diff --git a/.asf.yaml b/.asf.yaml index 9edce0b5eb2..94c18f0c581 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. github: - description: "Scalable, redundant, and distributed object store for Apache Hadoop" + description: "Scalable, reliable, distributed storage system optimized for data analytics and object store workloads." homepage: https://ozone.apache.org labels: - hadoop diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd0a12edd93..4c6723daff8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,8 +28,14 @@ on: required: false env: FAIL_FAST: ${{ github.event_name == 'pull_request' }} + # Minimum required Java version for running Ozone is defined in pom.xml (javac.version). + TEST_JAVA_VERSION: 21 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image + MAVEN_ARGS: --batch-mode --settings ${{ github.workspace }}/dev-support/ci/maven-settings.xml --show-version MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - OZONE_WITH_COVERAGE: ${{ github.repository == 'apache/ozone' && github.event_name == 'push' }} + HADOOP_IMAGE: ghcr.io/apache/hadoop + OZONE_IMAGE: ghcr.io/apache/ozone + OZONE_RUNNER_IMAGE: ghcr.io/apache/ozone-runner + OZONE_WITH_COVERAGE: ${{ github.event_name == 'push' }} jobs: build-info: runs-on: ubuntu-20.04 @@ -102,10 +108,6 @@ jobs: runs-on: ubuntu-20.04 timeout-minutes: 60 if: needs.build-info.outputs.needs-build == 'true' - strategy: - matrix: - java: [ 8 ] - fail-fast: false steps: - name: Checkout project uses: actions/checkout@v4 @@ -136,11 +138,11 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: ${{ matrix.java }} + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Run a full build run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} env: @@ -155,6 +157,7 @@ jobs: retention-days: 1 - name: Store source tarball for compilation uses: actions/upload-artifact@v4 + if: needs.build-info.outputs.needs-compile == 'true' with: name: ozone-src path: hadoop-ozone/dist/target/ozone-*-src.tar.gz @@ -171,6 +174,8 @@ jobs: - build-info - build - basic + - dependency + - license timeout-minutes: 45 if: needs.build-info.outputs.needs-compile == 'true' strategy: @@ -179,7 +184,7 @@ jobs: include: - os: ubuntu-20.04 - java: 8 - os: macos-12 + os: macos-13 fail-fast: false runs-on: ${{ matrix.os }} steps: @@ -212,13 +217,13 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ matrix.java }} uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} - name: Compile Ozone using Java ${{ matrix.java }} - run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} + run: hadoop-ozone/dev-support/checks/build.sh -Pdist -DskipRecon -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} env: OZONE_WITH_COVERAGE: false DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} @@ -253,7 +258,7 @@ jobs: key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | maven-repo- - if: ${{ !contains('author,bats,docs', matrix.check) }} + if: ${{ !contains('author,bats', matrix.check) }} - name: Download Ratis repo if: ${{ inputs.ratis_args != '' }} uses: actions/download-artifact@v4 @@ -261,19 +266,18 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java 8 uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ matrix.check }}.sh ${{ inputs.ratis_args }} - continue-on-error: true env: DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} @@ -309,19 +313,18 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 8 + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ github.job }}.sh ${{ inputs.ratis_args }} - continue-on-error: true env: DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} @@ -341,6 +344,15 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ needs.build-info.outputs.sha }} + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Download compiled Ozone binaries uses: actions/download-artifact@v4 with: @@ -388,13 +400,76 @@ jobs: name: ozone-repo path: | ~/.m2/repository/org/apache/ozone + - name: Setup java ${{ env.TEST_JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests run: | hadoop-ozone/dev-support/checks/${{ github.job }}.sh + - name: Summary of failures + run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt + if: ${{ failure() }} + - name: Archive build results + uses: actions/upload-artifact@v4 + if: always() + with: + name: ${{ github.job }} + path: target/${{ github.job }} continue-on-error: true + repro: + needs: + - build-info + - build + runs-on: ubuntu-20.04 + timeout-minutes: 30 + steps: + - name: Checkout project + uses: actions/checkout@v4 + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- + - name: Download Ozone repo + id: download-ozone-repo + uses: actions/download-artifact@v4 + with: + name: ozone-repo + path: | + ~/.m2/repository/org/apache/ozone + - name: Download Ratis repo + if: ${{ inputs.ratis_args != '' }} + uses: actions/download-artifact@v4 + with: + name: ratis-jars + path: | + ~/.m2/repository/org/apache/ratis + - name: Setup java ${{ env.TEST_JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.TEST_JAVA_VERSION }} + - name: Execute tests + run: | + hadoop-ozone/dev-support/checks/${{ github.job }}.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} + - name: Install diffoscope + run: | + sudo apt update -q + sudo apt install -y diffoscope + if: ${{ failure() }} + - name: Check artifact differences + run: | + hadoop-ozone/dev-support/checks/_diffoscope.sh + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -407,6 +482,8 @@ jobs: - build-info - build - basic + - dependency + - license runs-on: ubuntu-20.04 timeout-minutes: 150 if: needs.build-info.outputs.needs-compose-tests == 'true' @@ -419,6 +496,15 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ needs.build-info.outputs.sha }} + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Download compiled Ozone binaries uses: actions/download-artifact@v4 with: @@ -438,10 +524,9 @@ jobs: KEEP_IMAGE: false OZONE_ACCEPTANCE_SUITE: ${{ matrix.suite }} OZONE_VOLUME_OWNER: 1000 - continue-on-error: true - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -454,6 +539,8 @@ jobs: - build-info - build - basic + - dependency + - license runs-on: ubuntu-20.04 timeout-minutes: 60 if: needs.build-info.outputs.needs-kubernetes-tests == 'true' @@ -462,6 +549,15 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ needs.build-info.outputs.sha }} + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Download compiled Ozone binaries uses: actions/download-artifact@v4 with: @@ -476,10 +572,9 @@ jobs: sudo mkdir .aws && sudo chmod 777 .aws && sudo chown 1000 .aws popd ./hadoop-ozone/dev-support/checks/kubernetes.sh - continue-on-error: true - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -529,25 +624,31 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 17 + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests - continue-on-error: true run: | args="${{ inputs.ratis_args }}" if [[ "${{ matrix.profile }}" == "flaky" ]]; then args="$args -Dsurefire.rerunFailingTestsCount=5 -Dsurefire.fork.timeout=3600" fi + if [[ "${{ matrix.profile }}" != "filesystem" ]]; then + args="$args -DskipShade" + fi hadoop-ozone/dev-support/checks/integration.sh -P${{ matrix.profile }} ${args} env: DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures - run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + run: | + if [[ -s "target/${{ github.job }}/summary.md" ]]; then + cat target/${{ github.job }}/summary.md >> $GITHUB_STEP_SUMMARY + fi + hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -558,7 +659,7 @@ jobs: coverage: runs-on: ubuntu-20.04 timeout-minutes: 30 - if: github.repository == 'apache/ozone' && github.event_name == 'push' + if: github.event_name == 'push' needs: - build-info - acceptance @@ -587,15 +688,16 @@ jobs: run: | mkdir -p hadoop-ozone/dist/target tar xzvf target/artifacts/ozone-bin/ozone*.tar.gz -C hadoop-ozone/dist/target - - name: Calculate combined coverage - run: ./hadoop-ozone/dev-support/checks/coverage.sh - - name: Setup java 17 + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 17 + java-version: ${{ env.TEST_JAVA_VERSION }} + - name: Calculate combined coverage + run: ./hadoop-ozone/dev-support/checks/coverage.sh - name: Upload coverage to Sonar run: ./hadoop-ozone/dev-support/checks/sonar.sh + if: github.repository == 'apache/ozone' env: SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index 5de5654aced..cb765f36217 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -115,7 +115,7 @@ jobs: java-version: 8 - name: Build (most) of Ozone run: | - args="-Dskip.npx -Dskip.installnpx -DskipShade -Dmaven.javadoc.skip=true" + args="-DskipRecon -DskipShade -Dmaven.javadoc.skip=true" if [[ "${{ github.event.inputs.ratis-ref }}" != "" ]]; then args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" @@ -183,7 +183,7 @@ jobs: export OZONE_REPO_CACHED=true fi - args="-DexcludedGroups=native|slow|unhealthy" + args="-DexcludedGroups=native|slow|unhealthy -DskipShade" if [[ "${{ github.event.inputs.ratis-ref }}" != "" ]]; then args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" diff --git a/.github/workflows/populate-cache.yml b/.github/workflows/populate-cache.yml index cc93390a5bc..94f2ccfe52d 100644 --- a/.github/workflows/populate-cache.yml +++ b/.github/workflows/populate-cache.yml @@ -26,8 +26,7 @@ on: - 'pom.xml' - '**/pom.xml' - '.github/workflows/populate-cache.yml' - schedule: - - cron: '20 3 * * *' + workflow_call: workflow_dispatch: jobs: @@ -74,7 +73,7 @@ jobs: - name: Fetch dependencies if: steps.restore-cache.outputs.cache-hit != 'true' - run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist clean verify + run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist -Drocks_tools_native clean verify - name: Delete Ozone jars from repo if: steps.restore-cache.outputs.cache-hit != 'true' diff --git a/.github/workflows/scheduled-cache-update.yml b/.github/workflows/scheduled-cache-update.yml new file mode 100644 index 00000000000..94ac45e785e --- /dev/null +++ b/.github/workflows/scheduled-cache-update.yml @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow periodically updates dependency cache. + +name: scheduled-cache-update + +on: + schedule: + - cron: '20 3 * * *' + +jobs: + update: + uses: ./.github/workflows/populate-cache.yml + secrets: inherit diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index ac1c913fd50..549a1cddcd3 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,11 +24,11 @@ com.gradle develocity-maven-extension - 1.21.5 + 1.23 com.gradle common-custom-user-data-maven-extension - 2.0 + 2.0.1 diff --git a/NOTICE.txt b/NOTICE.txt index 7a1e855f6a3..cc4e3c58b39 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Apache Ozone -Copyright 2022 The Apache Software Foundation +Copyright 2024 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/SECURITY.md b/SECURITY.md index 3a89968026a..580f1862c7f 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,19 +2,7 @@ ## Supported Versions -The first stable release of Apache Ozone is 1.0, the previous alpha and beta releases are not supported by the community. - -| Version | Supported | -|---------------| ------------------ | -| 0.3.0 (alpha) | :x: | -| 0.4.0 (alpha) | :x: | -| 0.4.1 (alpha) | :x: | -| 0.5.0 (beta) | :x: | -| 1.0.0 | :x: | -| 1.1.0 | :x: | -| 1.2.1 | :x: | -| 1.3.0 | :x: | -| 1.4.0 | :white_check_mark: | +Please check the Apache Ozone [website](https://ozone.apache.org/downloads/) for the list of versions currently supported. ## Reporting a Vulnerability diff --git a/dev-support/ci/maven-settings.xml b/dev-support/ci/maven-settings.xml new file mode 100644 index 00000000000..43fa07bb52b --- /dev/null +++ b/dev-support/ci/maven-settings.xml @@ -0,0 +1,35 @@ + + + + + + block-snapshots1 + apache.snapshots + Block access to Apache Snapshots + https://repository.apache.org/snapshots + true + + + block-snapshots2 + apache.snapshots.https + Block access to Apache Snapshots + https://repository.apache.org/content/repositories/snapshots + true + + + diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index a95a981bdd3..6edd38d68fe 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -52,7 +52,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -76,7 +76,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -100,7 +100,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -112,7 +112,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -136,7 +136,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -148,7 +148,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -160,7 +160,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -172,7 +172,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -184,7 +184,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -196,7 +196,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -208,7 +208,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -232,7 +232,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -244,7 +244,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -256,7 +256,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -268,7 +268,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -429,3 +429,15 @@ load bats-assert/load.bash assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } + +@test "properties file in resources" { + run dev-support/ci/selective_ci_checks.sh 71b8bdd8becf72d6f7d4e7986895504b8259b3e5 + + assert_output -p 'basic-checks=["rat","checkstyle","native"]' + assert_output -p needs-build=false + assert_output -p needs-compile=false + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=true + assert_output -p needs-kubernetes-tests=false +} diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index e512b4a5d62..869d36fc6cc 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -330,6 +330,7 @@ function check_needs_compile() { if [[ ${match_count} != "0" ]]; then compile_needed=true + dependency_check_needed=true fi start_end::group_end @@ -373,6 +374,7 @@ function check_needs_checkstyle() { "^hadoop-hdds/dev-support/checkstyle" "pom.xml" "src/..../java" + "src/..../resources/.*\.properties" ) local ignore_array=( "^hadoop-ozone/dist" @@ -519,6 +521,7 @@ function calculate_test_types_to_run() { echo "Looks like ${COUNT_CORE_OTHER_CHANGED_FILES} core files changed, running all tests." echo compose_tests_needed=true + dependency_check_needed=true integration_tests_needed=true kubernetes_tests_needed=true else @@ -526,12 +529,14 @@ function calculate_test_types_to_run() { echo if [[ ${COUNT_COMPOSE_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then compose_tests_needed="true" + dependency_check_needed=true fi if [[ ${COUNT_INTEGRATION_CHANGED_FILES} != "0" ]]; then integration_tests_needed="true" fi if [[ ${COUNT_KUBERNETES_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then kubernetes_tests_needed="true" + dependency_check_needed=true fi fi start_end::group_end @@ -589,6 +594,7 @@ get_count_robot_files get_count_misc_files check_needs_build +check_needs_dependency check_needs_compile # calculate basic checks to run @@ -596,7 +602,6 @@ BASIC_CHECKS="rat" check_needs_author check_needs_bats check_needs_checkstyle -check_needs_dependency check_needs_docs check_needs_findbugs check_needs_native diff --git a/dev-support/pom.xml b/dev-support/pom.xml new file mode 100644 index 00000000000..ed656fcc20a --- /dev/null +++ b/dev-support/pom.xml @@ -0,0 +1,96 @@ + + + + 4.0.0 + + org.apache.ozone + ozone-main + 2.0.0-SNAPSHOT + + ozone-dev-support + Apache Ozone Dev Support + Helper module for sharing resources among projects + + + false + + + + + META-INF + ${project.build.directory}/extra-resources + + LICENSE.txt + NOTICE.txt + + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + org.apache.maven.plugins + maven-resources-plugin + + + copy-resources + + copy-resources + + validate + + ${project.build.directory}/extra-resources + + + ../ + + LICENSE.txt + NOTICE.txt + + + + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + ${project.build.outputDirectory} + + META-INF/LICENSE.txt + META-INF/NOTICE.txt + + + + + + bundle + + process-resources + + + + + + diff --git a/hadoop-hdds/annotations/pom.xml b/hadoop-hdds/annotations/pom.xml index 3bb148d5c25..0a961087040 100644 --- a/hadoop-hdds/annotations/pom.xml +++ b/hadoop-hdds/annotations/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-annotation-processing - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone annotation processing tools for validating custom annotations at compile time. diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index bf728403cb4..333b960fc24 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Client Library Apache Ozone HDDS Client jar diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java index 19a5a9cad5d..f6367b5a53a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java @@ -28,7 +28,9 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.util.MetricUtil; import java.util.Map; import java.util.UUID; @@ -52,6 +54,21 @@ public final class ContainerClientMetrics { private MutableCounterLong totalWriteChunkCalls; @Metric private MutableCounterLong totalWriteChunkBytes; + + @Metric + private MutableRate hsyncSynchronizedWorkNs; + @Metric + private MutableRate hsyncSendWriteChunkNs; + @Metric + private MutableRate hsyncWaitForFlushNs; + @Metric + private MutableRate hsyncWatchForCommitNs; + @Metric + private MutableCounterLong writeChunksDuringWrite; + @Metric + private MutableCounterLong flushesDuringWrite; + + private MutableQuantiles[] listBlockLatency; private MutableQuantiles[] getBlockLatency; private MutableQuantiles[] getCommittedBlockLengthLatency; @@ -82,6 +99,7 @@ public static synchronized void release() { } referenceCount--; if (referenceCount == 0) { + instance.stop(); DefaultMetricsSystem.instance().unregisterSource( SOURCE_NAME + instanceCount); instance = null; @@ -140,6 +158,17 @@ private ContainerClientMetrics() { } } + public void stop() { + MetricUtil.stop(listBlockLatency); + MetricUtil.stop(getBlockLatency); + MetricUtil.stop(getCommittedBlockLengthLatency); + MetricUtil.stop(readChunkLatency); + MetricUtil.stop(getSmallFileLatency); + MetricUtil.stop(hsyncLatencyNs); + MetricUtil.stop(omHsyncLatencyNs); + MetricUtil.stop(datanodeHsyncLatencyNs); + } + public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { writeChunkCallsByPipeline.computeIfAbsent(pipeline.getId(), pipelineID -> registry.newCounter( @@ -249,4 +278,28 @@ Map getWriteChunkCallsByPipeline() { Map getWriteChunksCallsByLeaders() { return writeChunksCallsByLeaders; } + + public MutableRate getHsyncSynchronizedWorkNs() { + return hsyncSynchronizedWorkNs; + } + + public MutableRate getHsyncSendWriteChunkNs() { + return hsyncSendWriteChunkNs; + } + + public MutableRate getHsyncWaitForFlushNs() { + return hsyncWaitForFlushNs; + } + + public MutableRate getHsyncWatchForCommitNs() { + return hsyncWatchForCommitNs; + } + + public MutableCounterLong getWriteChunksDuringWrite() { + return writeChunksDuringWrite; + } + + public MutableCounterLong getFlushesDuringWrite() { + return flushesDuringWrite; + } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 873f6f67348..a4b53a80a1e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -156,7 +156,7 @@ public enum ChecksumCombineMode { description = "Indicates the time duration in seconds a client will wait " + "before retrying a read key request on encountering " - + "a connectivity excepetion from Datanodes . " + + "a connectivity exception from Datanodes. " + "By default the interval is 1 second", tags = ConfigTag.CLIENT) private int readRetryInterval = 1; @@ -247,28 +247,49 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED"; + // ozone.client.hbase.enhancements.allowed + @Config(key = "hbase.enhancements.allowed", + defaultValue = "false", + description = "When set to false, client-side HBase enhancement-related Ozone (experimental) features " + + "are disabled (not allowed to be enabled) regardless of whether those configs are set.\n" + + "\n" + + "Here is the list of configs and values overridden when this config is set to false:\n" + + "1. ozone.fs.hsync.enabled = false\n" + + "2. ozone.client.incremental.chunk.list = false\n" + + "3. ozone.client.stream.putblock.piggybacking = false\n" + + "4. ozone.client.key.write.concurrency = 1\n" + + "\n" + + "A warning message will be printed if any of the above configs are overridden by this.", + tags = ConfigTag.CLIENT) + private boolean hbaseEnhancementsAllowed = false; + + // ozone.client.incremental.chunk.list @Config(key = "incremental.chunk.list", - defaultValue = "true", + defaultValue = "false", type = ConfigType.BOOLEAN, description = "Client PutBlock request can choose incremental chunk " + "list rather than full chunk list to optimize performance. " + - "Critical to HBase. EC does not support this feature.", + "Critical to HBase. EC does not support this feature. " + + "Can be enabled only when ozone.client.hbase.enhancements.allowed = true", tags = ConfigTag.CLIENT) - private boolean incrementalChunkList = true; + private boolean incrementalChunkList = false; + // ozone.client.stream.putblock.piggybacking @Config(key = "stream.putblock.piggybacking", - defaultValue = "true", + defaultValue = "false", type = ConfigType.BOOLEAN, - description = "Allow PutBlock to be piggybacked in WriteChunk " + - "requests if the chunk is small.", + description = "Allow PutBlock to be piggybacked in WriteChunk requests if the chunk is small. " + + "Can be enabled only when ozone.client.hbase.enhancements.allowed = true", tags = ConfigTag.CLIENT) - private boolean enablePutblockPiggybacking = true; + private boolean enablePutblockPiggybacking = false; + // ozone.client.key.write.concurrency @Config(key = "key.write.concurrency", defaultValue = "1", description = "Maximum concurrent writes allowed on each key. " + "Defaults to 1 which matches the behavior before HDDS-9844. " + - "For unlimited write concurrency, set this to -1 or any negative integer value.", + "For unlimited write concurrency, set this to -1 or any negative integer value. " + + "Any value other than 1 is effective only when ozone.client.hbase.enhancements.allowed = true", tags = ConfigTag.CLIENT) private int maxConcurrentWritePerKey = 1; @@ -298,6 +319,34 @@ public void validate() { OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; } + // Verify client configs related to HBase enhancements + // Enforce check on ozone.client.hbase.enhancements.allowed + if (!hbaseEnhancementsAllowed) { + // ozone.client.hbase.enhancements.allowed = false + if (incrementalChunkList) { + LOG.warn("Ignoring ozone.client.incremental.chunk.list = true " + + "because HBase enhancements are disallowed. " + + "To enable it, set ozone.client.hbase.enhancements.allowed = true."); + incrementalChunkList = false; + LOG.debug("Final ozone.client.incremental.chunk.list = {}", incrementalChunkList); + } + if (enablePutblockPiggybacking) { + LOG.warn("Ignoring ozone.client.stream.putblock.piggybacking = true " + + "because HBase enhancements are disallowed. " + + "To enable it, set ozone.client.hbase.enhancements.allowed = true."); + enablePutblockPiggybacking = false; + LOG.debug("Final ozone.client.stream.putblock.piggybacking = {}", enablePutblockPiggybacking); + } + if (maxConcurrentWritePerKey != 1) { + LOG.warn("Ignoring ozone.client.key.write.concurrency = {} " + + "because HBase enhancements are disallowed. " + + "To enable it, set ozone.client.hbase.enhancements.allowed = true.", + maxConcurrentWritePerKey); + maxConcurrentWritePerKey = 1; + LOG.debug("Final ozone.client.key.write.concurrency = {}", maxConcurrentWritePerKey); + } + // Note: ozone.fs.hsync.enabled is enforced by OzoneFSUtils#canEnableHsync, not here + } } public long getStreamBufferFlushSize() { @@ -486,6 +535,14 @@ public void setDatastreamPipelineMode(boolean datastreamPipelineMode) { this.datastreamPipelineMode = datastreamPipelineMode; } + public void setHBaseEnhancementsAllowed(boolean isHBaseEnhancementsEnabled) { + this.hbaseEnhancementsAllowed = isHBaseEnhancementsEnabled; + } + + public boolean getHBaseEnhancementsAllowed() { + return this.hbaseEnhancementsAllowed; + } + public void setIncrementalChunkList(boolean enable) { this.incrementalChunkList = enable; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index c02306f8af8..bf49d408f7f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -167,7 +167,7 @@ private synchronized void connectToDatanode(DatanodeDetails dn) } // read port from the data node, on failure use default configured // port. - int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); + int port = dn.getStandalonePort().getValue(); if (port == 0) { port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 07b70441721..52f31c9d129 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -185,8 +185,7 @@ private String getPipelineCacheKey(Pipeline pipeline, // Standalone port is chosen since all datanodes should have a // standalone port regardless of version and this port should not // have any collisions. - key += closestNode.getHostName() + closestNode.getPort( - DatanodeDetails.Port.Name.STANDALONE); + key += closestNode.getHostName() + closestNode.getStandalonePort(); } catch (IOException e) { LOG.error("Failed to get closest node to create pipeline cache key:" + e.getMessage()); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java index 704e886659a..50e010e85a2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -129,6 +130,7 @@ public void reset() { } public void unRegister() { + IOUtils.closeQuietly(containerOpsLatency.values()); MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index b0ef85cfbf7..979b1b99208 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -282,7 +282,7 @@ private CompletableFuture sendRequestAsync( // gets the minimum log index replicated to all servers @Override public long getReplicatedMinCommitIndex() { - return commitInfoMap.values().parallelStream() + return commitInfoMap.values().stream() .mapToLong(Long::longValue).min().orElse(0); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 61bc73420e6..7641de1274d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -124,7 +124,6 @@ XceiverClientReply watchOnLastIndex() throws IOException { * * @param commitIndex log index to watch for * @return minimum commit index replicated to all nodes - * @throws IOException IOException in case watch gets timed out */ CompletableFuture watchForCommitAsync(long commitIndex) { final MemoizedSupplier> supplier diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index d5423d4ec0b..48c77f2c863 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -364,7 +364,6 @@ public void writeOnRetry(long len) throws IOException { * it is a no op. * @param bufferFull flag indicating whether bufferFull condition is hit or * its called as part flush/close - * @return minimum commit index replicated to all nodes * @throws IOException IOException in case watch gets timed out */ public void watchForCommit(boolean bufferFull) throws IOException { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index f792a678dad..d6353be9d22 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -166,6 +166,7 @@ public synchronized void initialize() throws IOException { if (blockInfo != null && blockInfo.isUnderConstruction()) { // use the block length from DN if block is under construction. length = blockData.getSize(); + LOG.debug("Updated block length to {} for block {}", length, blockID); } break; // If we get a StorageContainerException or an IOException due to @@ -274,16 +275,6 @@ protected BlockData getBlockDataUsingClient() throws IOException { blockID); } - DatanodeBlockID.Builder blkIDBuilder = - DatanodeBlockID.newBuilder().setContainerID(blockID.getContainerID()) - .setLocalID(blockID.getLocalID()) - .setBlockCommitSequenceId(blockID.getBlockCommitSequenceId()); - - int replicaIndex = pipeline.getReplicaIndex(pipeline.getClosestNode()); - if (replicaIndex > 0) { - blkIDBuilder.setReplicaIndex(replicaIndex); - } - GetBlockResponseProto response = ContainerProtocolCalls.getBlock( xceiverClient, VALIDATORS, blockID, tokenRef.get(), pipeline.getReplicaIndexes()); return response.getBlockData(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index e88b097c499..86bcfb3990e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -62,7 +62,9 @@ import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; +import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; +import org.apache.hadoop.util.Time; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -198,9 +200,7 @@ public BlockOutputStream( blkIDBuilder.build()).addMetadata(keyValue); this.pipeline = pipeline; // tell DataNode I will send incremental chunk list - // EC does not support incremental chunk list. - this.supportIncrementalChunkList = config.getIncrementalChunkList() && - this instanceof RatisBlockOutputStream && allDataNodesSupportPiggybacking(); + this.supportIncrementalChunkList = canEnableIncrementalChunkList(); LOG.debug("incrementalChunkList is {}", supportIncrementalChunkList); if (supportIncrementalChunkList) { this.containerBlockData.addMetadata(INCREMENTAL_CHUNK_LIST_KV); @@ -233,15 +233,54 @@ public BlockOutputStream( writtenDataLength = 0; failedServers = new ArrayList<>(0); ioException = new AtomicReference<>(null); - checksum = new Checksum(config.getChecksumType(), - config.getBytesPerChecksum()); + this.checksum = new Checksum(config.getChecksumType(), config.getBytesPerChecksum(), true); this.clientMetrics = clientMetrics; this.streamBufferArgs = streamBufferArgs; - this.allowPutBlockPiggybacking = config.getEnablePutblockPiggybacking() && - allDataNodesSupportPiggybacking(); + this.allowPutBlockPiggybacking = canEnablePutblockPiggybacking(); LOG.debug("PutBlock piggybacking is {}", allowPutBlockPiggybacking); } + /** + * Helper method to check if incremental chunk list can be enabled. + * Prints debug messages if it cannot be enabled. + */ + private boolean canEnableIncrementalChunkList() { + boolean confEnableIncrementalChunkList = config.getIncrementalChunkList(); + if (!confEnableIncrementalChunkList) { + return false; + } + + if (!(this instanceof RatisBlockOutputStream)) { + // Note: EC does not support incremental chunk list + LOG.debug("Unable to enable incrementalChunkList because BlockOutputStream is not a RatisBlockOutputStream"); + return false; + } + if (!allDataNodesSupportPiggybacking()) { + // Not all datanodes support piggybacking and incremental chunk list. + LOG.debug("Unable to enable incrementalChunkList because not all datanodes support piggybacking"); + return false; + } + return confEnableIncrementalChunkList; + } + + /** + * Helper method to check if PutBlock piggybacking can be enabled. + * Prints debug message if it cannot be enabled. + */ + private boolean canEnablePutblockPiggybacking() { + boolean confEnablePutblockPiggybacking = config.getEnablePutblockPiggybacking(); + if (!confEnablePutblockPiggybacking) { + return false; + } + + if (!allDataNodesSupportPiggybacking()) { + // Not all datanodes support piggybacking and incremental chunk list. + LOG.debug("Unable to enable PutBlock piggybacking because not all datanodes support piggybacking"); + return false; + } + return confEnablePutblockPiggybacking; + } + private boolean allDataNodesSupportPiggybacking() { // return true only if all DataNodes in the pipeline are on a version // that supports PutBlock piggybacking. @@ -322,6 +361,7 @@ public void write(int b) throws IOException { private void writeChunkIfNeeded() throws IOException { if (currentBufferRemaining == 0) { LOG.debug("WriteChunk from write(), buffer = {}", currentBuffer); + clientMetrics.getWriteChunksDuringWrite().incr(); writeChunk(currentBuffer); updateWriteChunkLength(); } @@ -366,6 +406,7 @@ private void doFlushOrWatchIfNeeded() throws IOException { updatePutBlockLength(); CompletableFuture putBlockFuture = executePutBlock(false, false); recordWatchForCommitAsync(putBlockFuture); + clientMetrics.getFlushesDuringWrite().incr(); } if (bufferPool.isAtCapacity()) { @@ -375,10 +416,8 @@ private void doFlushOrWatchIfNeeded() throws IOException { } private void recordWatchForCommitAsync(CompletableFuture putBlockResultFuture) { - recordFlushFuture(watchForCommitAsync(putBlockResultFuture)); - } + final CompletableFuture flushFuture = putBlockResultFuture.thenCompose(x -> watchForCommit(x.commitIndex)); - private void recordFlushFuture(CompletableFuture flushFuture) { Preconditions.checkState(Thread.holdsLock(this)); this.lastFlushFuture = flushFuture; this.allPendingFlushFutures = allPendingFlushFutures.thenCombine(flushFuture, (last, curr) -> null); @@ -444,7 +483,8 @@ public synchronized void writeOnRetry(long len) throws IOException { writeChunk(buffer); putBlockFuture = executePutBlock(false, false); } - CompletableFuture watchForCommitAsync = watchForCommitAsync(putBlockFuture); + CompletableFuture watchForCommitAsync = + putBlockFuture.thenCompose(x -> watchForCommit(x.commitIndex)); try { watchForCommitAsync.get(); } catch (InterruptedException e) { @@ -477,33 +517,48 @@ void releaseBuffersOnException() { } /** - * Watch for a specific commit index. + * Send a watch request to wait until the given index became committed. + * When watch is not needed (e.g. EC), this is a NOOP. + * + * @param index the log index to wait for. + * @return the future of the reply. */ - XceiverClientReply sendWatchForCommit(long commitIndex) - throws IOException { - return null; + CompletableFuture sendWatchForCommit(long index) { + return CompletableFuture.completedFuture(null); } - private void watchForCommit(long commitIndex) throws IOException { - checkOpen(); + private CompletableFuture watchForCommit(long commitIndex) { try { - LOG.debug("Entering watchForCommit commitIndex = {}", commitIndex); - final XceiverClientReply reply = sendWatchForCommit(commitIndex); - if (reply != null) { - List dnList = reply.getDatanodes(); - if (!dnList.isEmpty()) { - Pipeline pipe = xceiverClient.getPipeline(); - - LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", - blockID, pipe, dnList); - failedServers.addAll(dnList); - } - } - } catch (IOException ioe) { - setIoException(ioe); - throw getIoException(); + checkOpen(); + } catch (IOException e) { + throw new FlushRuntimeException(e); } - LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex); + + LOG.debug("Entering watchForCommit commitIndex = {}", commitIndex); + final long start = Time.monotonicNowNanos(); + return sendWatchForCommit(commitIndex) + .thenAccept(this::checkReply) + .exceptionally(e -> { + throw new FlushRuntimeException(setIoException(e)); + }) + .whenComplete((r, e) -> { + LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex); + clientMetrics.getHsyncWatchForCommitNs().add(Time.monotonicNowNanos() - start); + }); + } + + private void checkReply(XceiverClientReply reply) { + if (reply == null) { + return; + } + final List dnList = reply.getDatanodes(); + if (dnList.isEmpty()) { + return; + } + + LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", + blockID, xceiverClient.getPipeline(), dnList); + failedServers.addAll(dnList); } void updateCommitInfo(XceiverClientReply reply, List buffers) { @@ -531,6 +586,7 @@ CompletableFuture executePutBlock(boolean close, final CompletableFuture flushFuture; final XceiverClientReply asyncReply; try { + // Note: checksum was previously appended to containerBlockData by WriteChunk BlockData blockData = containerBlockData.build(); LOG.debug("sending PutBlock {} flushPos {}", blockData, flushPos); @@ -645,12 +701,15 @@ private void handleFlushInternal(boolean close) throws IOException, InterruptedException, ExecutionException { checkOpen(); LOG.debug("Start handleFlushInternal close={}", close); - CompletableFuture toWaitFor = handleFlushInternalSynchronized(close); + CompletableFuture toWaitFor = captureLatencyNs(clientMetrics.getHsyncSynchronizedWorkNs(), + () -> handleFlushInternalSynchronized(close)); if (toWaitFor != null) { LOG.debug("Waiting for flush"); try { + long startWaiting = Time.monotonicNowNanos(); toWaitFor.get(); + clientMetrics.getHsyncWaitForFlushNs().add(Time.monotonicNowNanos() - startWaiting); } catch (ExecutionException ex) { if (ex.getCause() instanceof FlushRuntimeException) { throw ((FlushRuntimeException) ex.getCause()).cause; @@ -679,6 +738,7 @@ public void waitForAllPendingFlushes() throws IOException { } private synchronized CompletableFuture handleFlushInternalSynchronized(boolean close) throws IOException { + long start = Time.monotonicNowNanos(); CompletableFuture putBlockResultFuture = null; // flush the last chunk data residing on the currentBuffer if (totalWriteChunkLength < writtenDataLength) { @@ -720,19 +780,10 @@ private synchronized CompletableFuture handleFlushInternalSynchronized(boo if (putBlockResultFuture != null) { recordWatchForCommitAsync(putBlockResultFuture); } + clientMetrics.getHsyncSendWriteChunkNs().add(Time.monotonicNowNanos() - start); return lastFlushFuture; } - private CompletableFuture watchForCommitAsync(CompletableFuture putBlockResultFuture) { - return putBlockResultFuture.thenAccept(x -> { - try { - watchForCommit(x.commitIndex); - } catch (IOException e) { - throw new FlushRuntimeException(e); - } - }); - } - @Override public void close() throws IOException { if (xceiverClientFactory != null && xceiverClient != null) { @@ -771,7 +822,7 @@ void validateResponse( } - public void setIoException(Exception e) { + public IOException setIoException(Throwable e) { IOException ioe = getIoException(); if (ioe == null) { IOException exception = new IOException(EXCEPTION_MSG + e.toString(), e); @@ -782,6 +833,7 @@ public void setIoException(Exception e) { "so subsequent request also encounters " + "Storage Container Exception {}", ioe, e); } + return getIoException(); } void cleanup() { @@ -802,6 +854,8 @@ public synchronized void cleanup(boolean invalidateClient) { if (lastChunkBuffer != null) { DIRECT_BUFFER_POOL.returnBuffer(lastChunkBuffer); lastChunkBuffer = null; + // Clear checksum cache + checksum.clearChecksumCache(); } } @@ -851,7 +905,10 @@ private CompletableFuture writeChunkToContainer( final long offset = chunkOffset.getAndAdd(effectiveChunkSize); final ByteString data = chunk.toByteString( bufferPool.byteStringConversion()); - ChecksumData checksumData = checksum.computeChecksum(chunk); + // chunk is incremental, don't cache its checksum + ChecksumData checksumData = checksum.computeChecksum(chunk, false); + // side note: checksum object is shared with PutBlock's (blockData) checksum calc, + // current impl does not support caching both ChunkInfo chunkInfo = ChunkInfo.newBuilder() .setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex) .setOffset(offset) @@ -1001,6 +1058,7 @@ private void updateBlockDataForWriteChunk(ChunkBuffer chunk) lastChunkBuffer.capacity() - lastChunkBuffer.position(); appendLastChunkBuffer(chunk, 0, remainingBufferSize); updateBlockDataWithLastChunkBuffer(); + // TODO: Optional refactoring: Can attach ChecksumCache to lastChunkBuffer rather than Checksum appendLastChunkBuffer(chunk, remainingBufferSize, chunk.remaining() - remainingBufferSize); } @@ -1017,10 +1075,13 @@ private void updateBlockDataWithLastChunkBuffer() LOG.debug("lastChunkInfo = {}", lastChunkInfo); long lastChunkSize = lastChunkInfo.getLen(); addToBlockData(lastChunkInfo); - + // Set ByteBuffer limit to capacity, pos to 0. Does not erase data lastChunkBuffer.clear(); + if (lastChunkSize == config.getStreamBufferSize()) { lastChunkOffset += config.getStreamBufferSize(); + // Reached stream buffer size (chunk size), starting new chunk, need to clear checksum cache + checksum.clearChecksumCache(); } else { lastChunkBuffer.position((int) lastChunkSize); } @@ -1084,8 +1145,9 @@ private ChunkInfo createChunkInfo(long lastPartialChunkOffset) lastChunkBuffer.flip(); int revisedChunkSize = lastChunkBuffer.remaining(); // create the chunk info to be sent in PutBlock. - ChecksumData revisedChecksumData = - checksum.computeChecksum(lastChunkBuffer); + // checksum cache is utilized for this computation + // this checksum is stored in blockData and later transferred in PutBlock + ChecksumData revisedChecksumData = checksum.computeChecksum(lastChunkBuffer, true); long chunkID = lastPartialChunkOffset / config.getStreamBufferSize(); ChunkInfo.Builder revisedChunkInfo = ChunkInfo.newBuilder() diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java index 12ca9978c68..7776e245be0 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java @@ -38,9 +38,13 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.ArrayList; +import java.util.Comparator; import java.util.List; import java.util.Objects; +import java.util.Optional; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; @@ -142,8 +146,34 @@ ContainerCommandResponseProto> executePutBlock(boolean close, } if (checksumBlockData != null) { - List currentChunks = getContainerBlockData().getChunksList(); + + // For the same BlockGroupLength, we need to find the larger value of Block DataSize. + // This is because we do not send empty chunks to the DataNode, so the larger value is more accurate. + Map> maxDataSizeByGroup = Arrays.stream(blockData) + .filter(Objects::nonNull) + .collect(Collectors.groupingBy(BlockData::getBlockGroupLength, + Collectors.maxBy(Comparator.comparingLong(BlockData::getSize)))); + BlockData maxBlockData = maxDataSizeByGroup.get(blockGroupLength).get(); + + // When calculating the checksum size, + // We need to consider both blockGroupLength and the actual size of blockData. + // + // We use the smaller value to determine the size of the ChunkList. + // + // 1. In most cases, blockGroupLength is equal to the size of blockData. + // 2. Occasionally, blockData is not fully filled; if a chunk is empty, + // it is not sent to the DN, resulting in blockData size being smaller than blockGroupLength. + // 3. In cases with 'dirty data', + // if an error occurs when writing to the EC-Stripe (e.g., DN reports Container Closed), + // and the length confirmed with OM is smaller, blockGroupLength may be smaller than blockData size. + long blockDataSize = Math.min(maxBlockData.getSize(), blockGroupLength); + int chunkSize = (int) Math.ceil(((double) blockDataSize / repConfig.getEcChunkSize())); List checksumBlockDataChunks = checksumBlockData.getChunks(); + if (chunkSize > 0) { + checksumBlockDataChunks = checksumBlockData.getChunks().subList(0, chunkSize); + } + + List currentChunks = getContainerBlockData().getChunksList(); Preconditions.checkArgument( currentChunks.size() == checksumBlockDataChunks.size(), @@ -269,7 +299,7 @@ public CompletableFuture executePutBlock(boolean close, throw ce; }); } catch (IOException | ExecutionException e) { - throw new IOException(EXCEPTION_MSG + e.toString(), e); + throw new IOException(EXCEPTION_MSG + e, e); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); handleInterruptedException(ex, false); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index 3e78abbf485..e3f7f043a9e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -75,7 +75,6 @@ public synchronized int read(ByteBuffer byteBuffer) throws IOException { * readWithStrategy implementation, as it will never be called by the tests. * * @param strategy - * @return * @throws IOException */ protected abstract int readWithStrategy(ByteReaderStrategy strategy) throws diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java index 4bc144f3bd7..5f00e83e81b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java @@ -34,7 +34,7 @@ public class MultipartInputStream extends ExtendedInputStream { private final String key; - private final long length; + private long length; // List of PartInputStream, one for each part of the key private final List partStreams; @@ -56,6 +56,8 @@ public class MultipartInputStream extends ExtendedInputStream { // can be reset if a new position is seeked. private int prevPartIndex; + private boolean initialized = false; + public MultipartInputStream(String keyName, List inputStreams) { @@ -130,6 +132,9 @@ protected void checkPartBytesRead(int numBytesToRead, int numBytesRead, @Override public synchronized void seek(long pos) throws IOException { checkOpen(); + if (!initialized) { + initialize(); + } if (pos == 0 && length == 0) { // It is possible for length and pos to be zero in which case // seek should return instead of throwing exception @@ -173,6 +178,26 @@ public synchronized void seek(long pos) throws IOException { prevPartIndex = partIndex; } + public synchronized void initialize() throws IOException { + // Pre-check that the stream has not been intialized already + if (initialized) { + return; + } + + for (PartInputStream partInputStream : partStreams) { + if (partInputStream instanceof BlockInputStream) { + ((BlockInputStream) partInputStream).initialize(); + } + } + + long streamLength = 0L; + for (PartInputStream partInputStream : partStreams) { + streamLength += partInputStream.getLength(); + } + this.length = streamLength; + initialized = true; + } + @Override public synchronized long getPos() throws IOException { return length == 0 ? 0 : diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index d32c37eba6c..0f95716bf9a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -102,8 +102,8 @@ void releaseBuffersOnException() { } @Override - XceiverClientReply sendWatchForCommit(long commitIndex) throws IOException { - return commitWatcher.watchForCommit(commitIndex); + CompletableFuture sendWatchForCommit(long index) { + return commitWatcher.watchForCommitAsync(index); } @Override diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index d347dee8512..8287a5a78bb 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -43,7 +43,6 @@ public interface BlockInputStreamFactory { * @param blockInfo The blockInfo representing the block. * @param pipeline The pipeline to be used for reading the block * @param token The block Access Token - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index 8a87234a770..d9cadc948a6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -71,7 +71,6 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool, * @param blockInfo The blockInfo representing the block. * @param pipeline The pipeline to be used for reading the block * @param token The block Access Token - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index 6342de2c338..83abb937b03 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -152,7 +152,6 @@ protected int calculateExpectedDataBlocks(ECReplicationConfig rConfig) { * Using the current position, returns the index of the blockStream we should * be reading from. This is the index in the internal array holding the * stream reference. The block group index will be one greater than this. - * @return */ protected int currentStreamIndex() { return (int)((position / ecChunkSize) % repConfig.getData()); @@ -206,7 +205,6 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) throws IOE * to the replicaIndex given based on the EC pipeline fetched from SCM. * @param replicaIndex * @param refreshFunc - * @return */ protected Function ecPipelineRefreshFunction( int replicaIndex, Function refreshFunc) { @@ -241,7 +239,6 @@ protected Function ecPipelineRefreshFunction( * potentially partial last stripe. Note that the internal block index is * numbered starting from 1. * @param index - Index number of the internal block, starting from 1 - * @return */ protected long internalBlockLength(int index) { long lastStripe = blockInfo.getLength() % stripeSize; @@ -344,7 +341,6 @@ protected boolean shouldRetryFailedRead(int failedIndex) { * strategy buffer. This call may read from several internal BlockInputStreams * if there is sufficient space in the buffer. * @param strategy - * @return * @throws IOException */ @Override @@ -409,7 +405,6 @@ protected void seekStreamIfNecessary(BlockExtendedInputStream stream, * group length. * @param stream Stream to read from * @param strategy The ReaderStrategy to read data into - * @return * @throws IOException */ private int readFromStream(BlockExtendedInputStream stream, diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index 66e7a31337a..aca3cfed465 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -45,7 +45,6 @@ public interface ECBlockInputStreamFactory { * know are bad and should not be used. * @param repConfig The replication Config * @param blockInfo The blockInfo representing the block. - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index 01d0b0a7b7e..41c46aad379 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -68,7 +68,6 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory, * know are bad and should not be used. * @param repConfig The replication Config * @param blockInfo The blockInfo representing the block. - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java index 31f94e0acad..229cc3f3e36 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java @@ -85,7 +85,7 @@ * Parity elements long. Missing or not needed elements should be set to null * in the array. The elements should be assigned to the array in EC index order. * - * Assuming we have n missing data locations, where n <= parity locations, the + * Assuming we have n missing data locations, where n {@literal <=} parity locations, the * ByteBuffers passed in from the client are either assigned to the decoder * input array, or they are assigned to the decoder output array, where * reconstructed data is written. The required number of parity buffers will be diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java index 0dd29cb50a4..920d1e19e1b 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java @@ -22,6 +22,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; class TestOzoneClientConfig { @@ -36,4 +38,42 @@ void missingSizeSuffix() { assertEquals(OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE, subject.getBytesPerChecksum()); } + + @Test + void testClientHBaseEnhancementsAllowedTrue() { + // When ozone.client.hbase.enhancements.allowed = true, + // related client configs should be effective as-is. + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); + + // Note: ozone.fs.hsync.enabled is checked by OzoneFSUtils.canEnableHsync(), thus not checked here + conf.setBoolean("ozone.client.incremental.chunk.list", true); + conf.setBoolean("ozone.client.stream.putblock.piggybacking", true); + conf.setInt("ozone.client.key.write.concurrency", -1); + + OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); + + assertTrue(subject.getIncrementalChunkList()); + assertTrue(subject.getEnablePutblockPiggybacking()); + assertEquals(-1, subject.getMaxConcurrentWritePerKey()); + } + + @Test + void testClientHBaseEnhancementsAllowedFalse() { + // When ozone.client.hbase.enhancements.allowed = false, + // related client configs should be reverted back to default. + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", false); + + // Note: ozone.fs.hsync.enabled is checked by OzoneFSUtils.canEnableHsync(), thus not checked here + conf.setBoolean("ozone.client.incremental.chunk.list", true); + conf.setBoolean("ozone.client.stream.putblock.piggybacking", true); + conf.setInt("ozone.client.key.write.concurrency", -1); + + OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); + + assertFalse(subject.getIncrementalChunkList()); + assertFalse(subject.getEnablePutblockPiggybacking()); + assertEquals(1, subject.getMaxConcurrentWritePerKey()); + } } diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 29cb513bb6f..f2576f7cf08 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-common - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Common Apache Ozone HDDS Common jar diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 87707f75dc4..4d630243e51 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -401,4 +401,7 @@ private HddsConfigKeys() { "hdds.datanode.slow.op.warning.threshold"; public static final String HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT = "500ms"; + + public static final String OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY = + "ozone.volume.io.percentiles.intervals.seconds"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index ff0cef43c9e..42aaa18a317 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds; +import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.ServiceException; import jakarta.annotation.Nonnull; @@ -651,7 +652,7 @@ public static File createDir(String dirPath) { * Utility string formatter method to display SCM roles. * * @param nodes - * @return + * @return String */ public static String format(List nodes) { StringBuilder sb = new StringBuilder(); @@ -681,25 +682,31 @@ public static int roundupMb(long bytes) { /** * Unwrap exception to check if it is some kind of access control problem - * ({@link AccessControlException} or {@link SecretManager.InvalidToken}) + * ({@link org.apache.hadoop.security.AccessControlException} or + * {@link org.apache.hadoop.security.token.SecretManager.InvalidToken}) * or a RpcException. */ public static Throwable getUnwrappedException(Exception ex) { + Throwable t = ex; if (ex instanceof ServiceException) { - Throwable t = ex.getCause(); - if (t instanceof RemoteException) { - t = ((RemoteException) t).unwrapRemoteException(); - } - while (t != null) { - if (t instanceof RpcException || - t instanceof AccessControlException || - t instanceof SecretManager.InvalidToken) { - return t; - } - t = t.getCause(); + t = ex.getCause(); + } + if (t instanceof RemoteException) { + t = ((RemoteException) t).unwrapRemoteException(); + } + while (t != null) { + if (t instanceof RpcException || + t instanceof AccessControlException || + t instanceof SecretManager.InvalidToken) { + break; } + Throwable cause = t.getCause(); + if (cause == null || cause instanceof RemoteException) { + break; + } + t = cause; } - return null; + return t; } /** @@ -719,7 +726,7 @@ public static boolean shouldNotFailoverOnRpcException(Throwable exception) { return true; } } - return false; + return exception instanceof InvalidProtocolBufferException; } /** @@ -878,4 +885,17 @@ public static HddsProtos.UUID toProtobuf(UUID uuid) { ? Thread.currentThread().getStackTrace() : null; } + + /** + * Logs a warning to report that the class is not closed properly. + */ + public static void reportLeak(Class clazz, String stackTrace, Logger log) { + String warning = String.format("%s is not closed properly", clazz.getSimpleName()); + if (stackTrace != null && log.isDebugEnabled()) { + String debugMessage = String.format("%nStackTrace for unclosed instance: %s", + stackTrace); + warning = warning.concat(debugMessage); + } + log.warn(warning); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java index 63c29ba7c91..804e6552488 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java @@ -30,12 +30,24 @@ public final class JavaUtils { * is equal or greater than the parameter. * * @param version 8, 9, 10 etc. - * @return comparison with system property, always true for 8 + * @return comparison with system property, always true for any int up to 8 */ public static boolean isJavaVersionAtLeast(int version) { return JAVA_SPEC_VER >= version; } + /** + * Query to see if major version of Java specification of the system + * is equal or less than the parameter. + * + * @param version 8, 9, 10 etc. + * @return comparison with system property + */ + public static boolean isJavaVersionAtMost(int version) { + return JAVA_SPEC_VER <= version; + } + + /** * Private constructor. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java index 4251344139a..6e9ee946790 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java @@ -27,9 +27,9 @@ * class or method not changing over time. Currently the stability can be * {@link Stable}, {@link Evolving} or {@link Unstable}.
* - *
  • All classes that are annotated with {@link Public} or - * {@link LimitedPrivate} must have InterfaceStability annotation.
  • - *
  • Classes that are {@link Private} are to be considered unstable unless + *
    • All classes that are annotated with {@link InterfaceAudience.Public} or + * {@link InterfaceAudience.LimitedPrivate} must have InterfaceStability annotation.
    • + *
    • Classes that are {@link InterfaceAudience.Private} are to be considered unstable unless * a different InterfaceStability annotation states otherwise.
    • *
    • Incompatible changes must not be made to classes marked as stable.
    • *
    diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java new file mode 100644 index 00000000000..d4fde1b75cb --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import picocli.CommandLine; + +import java.util.ServiceLoader; + +/** + * Interface for parent commands that accept subcommands to be dynamically registered. + * Subcommands should: + *
  • implement the interface returned by {@link #subcommandType()}
  • + *
  • be annotated with {@code MetaInfServices} parameterized with the same type
  • + */ +public interface ExtensibleParentCommand { + + /** @return The class of the marker interface for subcommands. */ + Class subcommandType(); + + /** Recursively find and add subcommands to {@code cli}. */ + static void addSubcommands(CommandLine cli) { + Object command = cli.getCommand(); + + // find and add subcommands + if (command instanceof ExtensibleParentCommand) { + ExtensibleParentCommand parentCommand = (ExtensibleParentCommand) command; + ServiceLoader subcommands = ServiceLoader.load(parentCommand.subcommandType()); + for (Object subcommand : subcommands) { + final CommandLine.Command commandAnnotation = subcommand.getClass().getAnnotation(CommandLine.Command.class); + CommandLine subcommandCommandLine = new CommandLine(subcommand, cli.getFactory()); + cli.addSubcommand(commandAnnotation.name(), subcommandCommandLine); + } + } + + // process subcommands recursively + for (CommandLine subcommand : cli.getSubcommands().values()) { + addSubcommands(subcommand); + } + } + +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index 4c5f3fdc872..14d454431f9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -16,18 +16,18 @@ */ package org.apache.hadoop.hdds.cli; +import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; -import java.util.ServiceLoader; import java.util.concurrent.Callable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine; -import picocli.CommandLine.Command; import picocli.CommandLine.ExitCode; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Option; @@ -50,32 +50,21 @@ public class GenericCli implements Callable, GenericParentCommand { private String configurationPath; private final CommandLine cmd; + private OzoneConfiguration conf; + private UserGroupInformation user; public GenericCli() { - cmd = new CommandLine(this); + this(CommandLine.defaultFactory()); + } + + public GenericCli(CommandLine.IFactory factory) { + cmd = new CommandLine(this, factory); cmd.setExecutionExceptionHandler((ex, commandLine, parseResult) -> { printError(ex); return EXECUTION_ERROR_EXIT_CODE; }); - } - public GenericCli(Class type) { - this(); - addSubcommands(getCmd(), type); - } - - private void addSubcommands(CommandLine cli, Class type) { - ServiceLoader registeredSubcommands = - ServiceLoader.load(SubcommandWithParent.class); - for (SubcommandWithParent subcommand : registeredSubcommands) { - if (subcommand.getParentType().equals(type)) { - final Command commandAnnotation = - subcommand.getClass().getAnnotation(Command.class); - CommandLine subcommandCommandLine = new CommandLine(subcommand); - addSubcommands(subcommandCommandLine, subcommand.getClass()); - cli.addSubcommand(commandAnnotation.name(), subcommandCommandLine); - } - } + ExtensibleParentCommand.addSubcommands(cmd); } /** @@ -130,6 +119,20 @@ public OzoneConfiguration createOzoneConfiguration() { return ozoneConf; } + public OzoneConfiguration getOzoneConf() { + if (conf == null) { + conf = createOzoneConfiguration(); + } + return conf; + } + + public UserGroupInformation getUser() throws IOException { + if (user == null) { + user = UserGroupInformation.getCurrentUser(); + } + return user; + } + @VisibleForTesting public picocli.CommandLine getCmd() { return cmd; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java index c176ad1464e..20755a6e0ec 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java @@ -97,7 +97,6 @@ public static JsonNode getBeansJsonNode(String metricsJson) throws IOException { * Returns the number of decommissioning nodes. * * @param jsonNode - * @return */ public static int getNumDecomNodes(JsonNode jsonNode) { int numDecomNodes; @@ -118,7 +117,6 @@ public static int getNumDecomNodes(JsonNode jsonNode) { * @param numDecomNodes * @param countsMap * @param errMsg - * @return * @throws IOException */ @Nullable diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java index a6dbd933ff1..9709029634c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java @@ -218,6 +218,11 @@ public String configFormat() { + "/" + data + "-" + parity + "-" + chunkKB(); } + @Override + public int getMinimumNodes() { + return data; + } + private String chunkKB() { return ecChunkSize / 1024 + "k"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java index 36d4d90e1af..9c42e3d59b1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java @@ -126,4 +126,9 @@ public String toString() { public String configFormat() { return toString(); } + + @Override + public int getMinimumNodes() { + return 1; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java index 7542409679b..d82cd08c08e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java @@ -234,4 +234,6 @@ static ReplicationConfig parseWithoutFallback(ReplicationType type, String configFormat(); + /** Minimum number of nodes, below this data loss happens. */ + int getMinimumNodes(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java index 9ca2dfb538a..0b82ab8c872 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java @@ -128,4 +128,9 @@ public String toString() { public String configFormat() { return toString(); } + + @Override + public int getMinimumNodes() { + return replicationFactor.getNumber(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 01f508d257c..1c324ac8ff5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hdds.protocol; import java.util.ArrayList; +import java.util.Collections; import java.util.EnumSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.UUID; @@ -74,7 +76,8 @@ public class DatanodeDetails extends NodeImpl implements private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(ExtendedDatanodeDetailsProto.getDefaultInstance()), DatanodeDetails::getFromProtoBuf, - DatanodeDetails::getExtendedProtoBufMessage); + DatanodeDetails::getExtendedProtoBufMessage, + DatanodeDetails.class); public static Codec getCodec() { return CODEC; @@ -93,7 +96,6 @@ public static Codec getCodec() { private String version; private long setupTime; private String revision; - private String buildDate; private volatile HddsProtos.NodeOperationalState persistedOpState; private volatile long persistedOpStateExpiryEpochSec; private int initialVersion; @@ -111,7 +113,6 @@ private DatanodeDetails(Builder b) { version = b.version; setupTime = b.setupTime; revision = b.revision; - buildDate = b.buildDate; persistedOpState = b.persistedOpState; persistedOpStateExpiryEpochSec = b.persistedOpStateExpiryEpochSec; initialVersion = b.initialVersion; @@ -140,7 +141,6 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.version = datanodeDetails.version; this.setupTime = datanodeDetails.setupTime; this.revision = datanodeDetails.revision; - this.buildDate = datanodeDetails.buildDate; this.persistedOpState = datanodeDetails.getPersistedOpState(); this.persistedOpStateExpiryEpochSec = datanodeDetails.getPersistedOpStateExpiryEpochSec(); @@ -236,6 +236,18 @@ public synchronized void setPort(Name name, int port) { setPort(new Port(name, port)); } + public void setRatisPort(int port) { + setPort(Name.RATIS, port); + } + + public void setRestPort(int port) { + setPort(Name.REST, port); + } + + public void setStandalonePort(int port) { + setPort(Name.STANDALONE, port); + } + /** * Returns all the Ports used by DataNode. * @@ -326,20 +338,52 @@ public void setPersistedOpStateExpiryEpochSec(long expiry) { * @return Port */ public synchronized Port getPort(Port.Name name) { + Port ratisPort = null; for (Port port : ports) { if (port.getName().equals(name)) { return port; } + if (port.getName().equals(Name.RATIS)) { + ratisPort = port; + } } - // if no separate admin/server/datastream port, return single Ratis one for - // compat + // if no separate admin/server/datastream port, + // return single Ratis one for compatibility if (name == Name.RATIS_ADMIN || name == Name.RATIS_SERVER || name == Name.RATIS_DATASTREAM) { - return getPort(Name.RATIS); + return ratisPort; } return null; } + /** + * Helper method to get the Ratis port. + * + * @return Port + */ + public Port getRatisPort() { + return getPort(Name.RATIS); + } + + /** + * Helper method to get the REST port. + * + * @return Port + */ + public Port getRestPort() { + return getPort(Name.REST); + } + + /** + * Helper method to get the Standalone port. + * + * @return Port + */ + public Port getStandalonePort() { + return getPort(Name.STANDALONE); + } + + /** * Starts building a new DatanodeDetails from the protobuf input. * @@ -432,9 +476,6 @@ public static DatanodeDetails getFromProtoBuf( if (extendedDetailsProto.hasRevision()) { builder.setRevision(extendedDetailsProto.getRevision()); } - if (extendedDetailsProto.hasBuildDate()) { - builder.setBuildDate(extendedDetailsProto.getBuildDate()); - } return builder.build(); } @@ -448,11 +489,24 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { } public HddsProtos.DatanodeDetailsProto toProto(int clientVersion) { - return toProtoBuilder(clientVersion).build(); + return toProtoBuilder(clientVersion, Collections.emptySet()).build(); + } + + public HddsProtos.DatanodeDetailsProto toProto(int clientVersion, Set filterPorts) { + return toProtoBuilder(clientVersion, filterPorts).build(); } + /** + * Converts the current DatanodeDetails instance into a proto {@link HddsProtos.DatanodeDetailsProto.Builder} object. + * + * @param clientVersion - The client version. + * @param filterPorts - A set of {@link Port.Name} specifying ports to include. + * If empty, all available ports will be included. + * @return A {@link HddsProtos.DatanodeDetailsProto.Builder} Object. + */ + public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( - int clientVersion) { + int clientVersion, Set filterPorts) { HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder() .setMostSigBits(uuid.getMostSignificantBits()) @@ -491,15 +545,25 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( final boolean handlesUnknownPorts = ClientVersion.fromProtoValue(clientVersion) .compareTo(VERSION_HANDLES_UNKNOWN_DN_PORTS) >= 0; + final int requestedPortCount = filterPorts.size(); + final boolean maySkip = requestedPortCount > 0; for (Port port : ports) { - if (handlesUnknownPorts || Name.V0_PORTS.contains(port.getName())) { + if (maySkip && !filterPorts.contains(port.getName())) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip adding {} port {} to proto message", + port.getName(), port.getValue()); + } + } else if (handlesUnknownPorts || Name.V0_PORTS.contains(port.getName())) { builder.addPorts(port.toProto()); } else { if (LOG.isDebugEnabled()) { LOG.debug("Skip adding {} port {} to proto message for client v{}", - port.getName(), port.getValue(), clientVersion); + port.getName(), port.getValue(), clientVersion); } } + if (maySkip && builder.getPortsCount() == requestedPortCount) { + break; + } } builder.setCurrentVersion(currentVersion); @@ -526,9 +590,6 @@ public ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { if (!Strings.isNullOrEmpty(getRevision())) { extendedBuilder.setRevision(getRevision()); } - if (!Strings.isNullOrEmpty(getBuildDate())) { - extendedBuilder.setBuildDate(getBuildDate()); - } return extendedBuilder.build(); } @@ -587,6 +648,20 @@ public boolean equals(Object obj) { uuid.equals(((DatanodeDetails) obj).uuid); } + + /** + * Checks hostname, ipAddress and port of the 2 nodes are the same. + * @param datanodeDetails dnDetails object to compare with. + * @return true if the values match otherwise false. + */ + public boolean compareNodeValues(DatanodeDetails datanodeDetails) { + if (this == datanodeDetails || super.equals(datanodeDetails)) { + return true; + } + return Objects.equals(ipAddress, datanodeDetails.ipAddress) + && Objects.equals(hostName, datanodeDetails.hostName) && Objects.equals(ports, datanodeDetails.ports); + } + @Override public int hashCode() { return uuid.hashCode(); @@ -621,7 +696,6 @@ public static final class Builder { private String version; private long setupTime; private String revision; - private String buildDate; private HddsProtos.NodeOperationalState persistedOpState; private long persistedOpStateExpiryEpochSec = 0; private int initialVersion; @@ -653,7 +727,6 @@ public Builder setDatanodeDetails(DatanodeDetails details) { this.version = details.getVersion(); this.setupTime = details.getSetupTime(); this.revision = details.getRevision(); - this.buildDate = details.getBuildDate(); this.persistedOpState = details.getPersistedOpState(); this.persistedOpStateExpiryEpochSec = details.getPersistedOpStateExpiryEpochSec(); @@ -800,18 +873,6 @@ public Builder setRevision(String rev) { return this; } - /** - * Sets the DataNode build date. - * - * @param date the build date of DataNode. - * - * @return DatanodeDetails.Builder - */ - public Builder setBuildDate(String date) { - this.buildDate = date; - return this; - } - /** * Sets the DataNode setup time. * @@ -885,6 +946,36 @@ public static Port newPort(Port.Name name, Integer value) { return new Port(name, value); } + /** + * Constructs a new Ratis Port with the given port number. + * + * @param portNumber Port number + * @return the {@link Port} instance + */ + public static Port newRatisPort(Integer portNumber) { + return newPort(Name.RATIS, portNumber); + } + + /** + * Constructs a new REST Port with the given port number. + * + * @param portNumber Port number + * @return the {@link Port} instance + */ + public static Port newRestPort(Integer portNumber) { + return newPort(Name.REST, portNumber); + } + + /** + * Constructs a new Standalone Port with the given port number. + * + * @param portNumber Port number + * @return the {@link Port} instance + */ + public static Port newStandalonePort(Integer portNumber) { + return newPort(Name.STANDALONE, portNumber); + } + /** * Container to hold DataNode Port details. */ @@ -908,6 +999,9 @@ public enum Name { Name.values()); public static final Set V0_PORTS = ImmutableSet.copyOf( EnumSet.of(STANDALONE, RATIS, REST)); + + public static final Set IO_PORTS = ImmutableSet.copyOf( + EnumSet.of(STANDALONE, RATIS, RATIS_DATASTREAM)); } private final Name name; @@ -1053,29 +1147,11 @@ public void setRevision(String rev) { this.revision = rev; } - /** - * Returns the DataNode build date. - * - * @return DataNode build date - */ - public String getBuildDate() { - return buildDate; - } - - /** - * Set DataNode build date. - * - * @param date DataNode build date - */ - public void setBuildDate(String date) { - this.buildDate = date; - } - @Override public HddsProtos.NetworkNode toProtobuf( int clientVersion) { return HddsProtos.NetworkNode.newBuilder() - .setDatanodeDetails(toProtoBuilder(clientVersion).build()) + .setDatanodeDetails(toProtoBuilder(clientVersion, Collections.emptySet()).build()) .build(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java index 3ed9f4e58e1..eb6142ea67d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java @@ -20,11 +20,13 @@ /** * This class contains constants for Recon related configuration keys used in - * SCM & Datanode. + * SCM and Datanode. */ public final class ReconConfigKeys { /** + * This class contains constants for Recon related configuration keys used in + * SCM and Datanode. * Never constructed. */ private ReconConfigKeys() { @@ -71,7 +73,7 @@ private ReconConfigKeys() { * Recon administrator users delimited by a comma. * This is the list of users who can access admin only information from recon. * Users defined in - * {@link org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS} + * {@link org.apache.hadoop.ozone.OzoneConfigKeys#OZONE_ADMINISTRATORS} * will always be able to access all recon information regardless of this * setting. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index 3ef9317ced0..dd78faf6827 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -206,8 +206,7 @@ public int getScmDefaultLayoutVersionOnInit() { * required for SCMSecurityProtocol where the KerberosInfo references * the old configuration with * the annotation shown below:- - * @KerberosInfo(serverPrincipal = ScmConfigKeys - * .HDDS_SCM_KERBEROS_PRINCIPAL_KEY) + * {@code @KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)} */ public static class ConfigStrings { public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 36d4dbd45a2..c4b42acec43 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -131,6 +131,11 @@ public final class ScmConfigKeys { "hdds.ratis.snapshot.threshold"; public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String OZONE_SCM_CONTAINER_LIST_MAX_COUNT = + "ozone.scm.container.list.max.count"; + + public static final int OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; + // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; // 4 MB by default @@ -144,6 +149,10 @@ public final class ScmConfigKeys { "ozone.chunk.read.mapped.buffer.threshold"; public static final String OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT = "32KB"; + public static final String OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_KEY = + "ozone.chunk.read.mapped.buffer.max.count"; + // this max_count could not be greater than Linux platform max_map_count which by default is 65530. + public static final int OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT = 0; public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY = "ozone.scm.container.layout"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java index 19c39698dec..aeb894564b5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java @@ -30,6 +30,7 @@ public final class ScmInfo { private final String clusterId; private final String scmId; private final List peerRoles; + private final boolean scmRatisEnabled; /** * Builder for ScmInfo. @@ -38,6 +39,7 @@ public static class Builder { private String clusterId; private String scmId; private final List peerRoles; + private boolean scmRatisEnabled; public Builder() { peerRoles = new ArrayList<>(); @@ -73,15 +75,28 @@ public Builder setRatisPeerRoles(List roles) { return this; } + /** + * Set whether SCM enables Ratis. + * + * @param ratisEnabled If it is true, it means that the Ratis mode is turned on. + * If it is false, it means that the Ratis mode is not turned on. + * @return Builder for scmInfo + */ + public Builder setScmRatisEnabled(boolean ratisEnabled) { + scmRatisEnabled = ratisEnabled; + return this; + } + public ScmInfo build() { - return new ScmInfo(clusterId, scmId, peerRoles); + return new ScmInfo(clusterId, scmId, peerRoles, scmRatisEnabled); } } - private ScmInfo(String clusterId, String scmId, List peerRoles) { + private ScmInfo(String clusterId, String scmId, List peerRoles, boolean ratisEnabled) { this.clusterId = clusterId; this.scmId = scmId; this.peerRoles = Collections.unmodifiableList(peerRoles); + this.scmRatisEnabled = ratisEnabled; } /** @@ -107,4 +122,8 @@ public String getScmId() { public List getRatisPeerRoles() { return peerRoles; } + + public boolean getScmRatisEnabled() { + return scmRatisEnabled; + } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java similarity index 100% rename from hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 648e2586ae2..91c0cbd50b4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -122,10 +123,11 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; /** @@ -135,10 +137,11 @@ List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container of this state will be returned. * @param replicationConfig container replication Config. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, int count, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -392,11 +395,19 @@ StartContainerBalancerResponseProto startContainerBalancer( */ List getScmRatisRoles() throws IOException; + /** + * Get the current SCM mode. + * + * @return `true` indicates that it is in RATIS mode, + * while `false` indicates that it is in STANDALONE mode. + * @throws IOException an I/O exception of some sort has occurred. + */ + boolean isScmRatisEnable() throws IOException; + /** * Force generates new secret keys (rotate). * * @param force boolean flag that forcefully rotates the key on demand - * @return * @throws IOException */ boolean rotateSecretKeys(boolean force) throws IOException; @@ -414,7 +425,7 @@ StartContainerBalancerResponseProto startContainerBalancer( * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if < 0. return all. + * @param count Maximum num of returned transactions, if {@literal < 0}. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java index 88522f2f9f4..90f690da5a1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java @@ -37,7 +37,7 @@ public final class ContainerID implements Comparable { private static final Codec CODEC = new DelegatedCodec<>( LongCodec.get(), ContainerID::valueOf, c -> c.id, - DelegatedCodec.CopyType.SHALLOW); + ContainerID.class, DelegatedCodec.CopyType.SHALLOW); public static final ContainerID MIN = ContainerID.valueOf(0); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index 6bf2d5500c8..90eb8b47de1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -46,7 +46,8 @@ public final class ContainerInfo implements Comparable { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(HddsProtos.ContainerInfoProto.getDefaultInstance()), ContainerInfo::fromProtobuf, - ContainerInfo::getProtobuf); + ContainerInfo::getProtobuf, + ContainerInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java new file mode 100644 index 00000000000..9e8d5738db8 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import java.util.List; + +/** + * Wrapper class for the result of listing containers with their total count. + */ +public class ContainerListResult { + private final List containerInfoList; + private final long totalCount; + + /** + * Constructs a new ContainerListResult. + * + * @param containerInfoList the list of containers + * @param totalCount the total number of containers + */ + public ContainerListResult(List containerInfoList, long totalCount) { + this.containerInfoList = containerInfoList; + this.totalCount = totalCount; + } + + /** + * Gets the list of containers. + * + * @return the list of containers + */ + public List getContainerInfoList() { + return containerInfoList; + } + + /** + * Gets the total count of containers. + * + * @return the total count of containers + */ + public long getTotalCount() { + return totalCount; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java index df8e9d45e13..45bc77d1d8f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -146,7 +146,6 @@ public long getReportTimeStamp() { /** * Return a map of all stats and their value as a long. - * @return */ public Map getStats() { Map result = new HashMap<>(); @@ -159,7 +158,6 @@ public Map getStats() { /** * Return a map of all samples, with the stat as the key and the samples * for the stat as a List of Long. - * @return */ public Map> getSamples() { Map> result = new HashMap<>(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java index ac72dc94224..f4c9a5dbda9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java @@ -21,6 +21,7 @@ import java.util.Comparator; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -61,7 +62,7 @@ public HddsProtos.ContainerWithPipeline getProtobuf(int clientVersion) HddsProtos.ContainerWithPipeline.Builder builder = HddsProtos.ContainerWithPipeline.newBuilder(); builder.setContainerInfo(getContainerInfo().getProtobuf()) - .setPipeline(getPipeline().getProtobufMessage(clientVersion)); + .setPipeline(getPipeline().getProtobufMessage(clientVersion, Name.IO_PORTS)); return builder.build(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index af4e7299383..b71adb7099a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -54,7 +54,7 @@ import java.util.List; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY; @@ -159,7 +159,7 @@ public static String getSCMRatisSnapshotDirectory(ConfigurationSource conf) { OZONE_METADATA_DIRS); File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); snapshotDir = - Paths.get(metaDirPath.getPath(), SCM_RATIS_SNAPSHOT_DIR).toString(); + Paths.get(metaDirPath.getPath(), OZONE_RATIS_SNAPSHOT_DIR).toString(); } return snapshotDir; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java index a5e443a598d..66fe7d18783 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java @@ -67,7 +67,6 @@ public class SCMNodeInfo { /** * Build SCM Node information from configuration. * @param conf - * @return */ public static List buildNodeInfo(ConfigurationSource conf) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 332dddac25c..779f2456be6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -307,10 +307,13 @@ public void remove(Node node) { * @param loc string location of a node. If loc starts with "/", it's a * absolute path, otherwise a relative path. Following examples * are all accepted, + *
    +   *            {@code
        *            1.  /dc1/rm1/rack1          -> an inner node
        *            2.  /dc1/rm1/rack1/node1    -> a leaf node
        *            3.  rack1/node1             -> a relative path to this node
    -   *
    +   *            }
    +   *            
    * @return null if the node is not found */ @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 1f3d0f02e6d..31e83f82d69 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -789,6 +789,9 @@ public List sortByDistanceCost(Node reader, List shuffledNodes = new ArrayList<>(nodes.subList(0, activeLen)); shuffleOperation.accept(shuffledNodes); + if (LOG.isDebugEnabled()) { + LOG.debug("Sorted datanodes {}, result: {}", nodes, shuffledNodes); + } return shuffledNodes; } // Sort weights for the nodes array @@ -815,6 +818,9 @@ public List sortByDistanceCost(Node reader, Preconditions.checkState(ret.size() == activeLen, "Wrong number of nodes sorted!"); + if (LOG.isDebugEnabled()) { + LOG.debug("Sorted datanodes {} for client {}, result: {}", nodes, reader, ret); + } return ret; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 1486f05f55c..7390de95fe9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -68,6 +68,7 @@ public final class Pipeline { Proto2Codec.get(HddsProtos.Pipeline.getDefaultInstance()), Pipeline::getFromProtobufSetCreationTimestamp, p -> p.getProtobufMessage(ClientVersion.CURRENT_VERSION), + Pipeline.class, DelegatedCodec.CopyType.UNSUPPORTED); public static Codec getCodec() { @@ -243,7 +244,6 @@ public int getReplicaIndex(DatanodeDetails dn) { /** * Get the replicaIndex Map. - * @return */ public Map getReplicaIndexes() { return this.getNodes().stream().collect(Collectors.toMap(Function.identity(), this::getReplicaIndex)); @@ -330,7 +330,11 @@ public List getNodesInOrder() { } void reportDatanode(DatanodeDetails dn) throws IOException { - if (nodeStatus.get(dn) == null) { + //This is a workaround for the case a datanode restarted with reinitializing it's dnId but it still reports the + // same set of pipelines it was part of. The pipeline report should be accepted for this anomalous condition. + // We rely on StaleNodeHandler in closing this pipeline eventually. + if (dn == null || (nodeStatus.get(dn) == null + && nodeStatus.keySet().stream().noneMatch(node -> node.compareNodeValues(dn)))) { throw new IOException( String.format("Datanode=%s not part of pipeline=%s", dn, id)); } @@ -362,12 +366,17 @@ public ReplicationConfig getReplicationConfig() { public HddsProtos.Pipeline getProtobufMessage(int clientVersion) throws UnknownPipelineStateException { + return getProtobufMessage(clientVersion, Collections.emptySet()); + } + + public HddsProtos.Pipeline getProtobufMessage(int clientVersion, Set filterPorts) + throws UnknownPipelineStateException { List members = new ArrayList<>(); List memberReplicaIndexes = new ArrayList<>(); for (DatanodeDetails dn : nodeStatus.keySet()) { - members.add(dn.toProto(clientVersion)); + members.add(dn.toProto(clientVersion, filterPorts)); memberReplicaIndexes.add(replicaIndexes.getOrDefault(dn, 0)); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java index 5ca35456261..92e01735d53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java @@ -34,7 +34,7 @@ public final class PipelineID { private static final Codec CODEC = new DelegatedCodec<>( UuidCodec.get(), PipelineID::valueOf, c -> c.id, - DelegatedCodec.CopyType.SHALLOW); + PipelineID.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index e91b50b4145..419623f3c06 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -146,10 +147,11 @@ List getExistContainerWithPipelinesInBatch( * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big) * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; /** @@ -165,10 +167,11 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -184,14 +187,14 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param factor Container factor - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; - /** * Ask SCM for a list of containers with a range of container ID, state * and replication config, and the limit of count. @@ -205,10 +208,11 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param replicationConfig Replication config for the containers - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; @@ -337,7 +341,7 @@ Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if < 0. return all. + * @param count Maximum num of returned transactions, if {@literal < 0}. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java index 79db6985e76..e74bb1f621a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.security.x509.certificate.client; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.exception.OzoneSecurityException; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; import org.apache.hadoop.hdds.security.ssl.ReloadingX509TrustManager; @@ -128,23 +129,6 @@ X509Certificate getCertificate(String certSerialId) */ Set getAllCaCerts(); - /** - * Return the pem encoded CA certificate list. - *

    - * If initialized return list of pem encoded CA certificates, else return - * null. - * - * @return list of pem encoded CA certificates. - */ - List getCAList(); - - /** - * Update and returns the pem encoded CA certificate list. - * @return list of pem encoded CA certificates. - * @throws IOException - */ - List updateCAList() throws IOException; - /** * Verifies a digital Signature, given the signature and the certificate of * the signer. @@ -176,10 +160,32 @@ default void assertValidKeysAndCertificate() throws OzoneSecurityException { } } + /** + * Gets a KeyManager containing this CertificateClient's key material and trustchain. + * During certificate rotation this KeyManager is automatically updated with the new keys/certificates. + * + * @return A KeyManager containing keys and the trustchain for this CertificateClient. + * @throws CertificateException + */ ReloadingX509KeyManager getKeyManager() throws CertificateException; + /** + * Gets a TrustManager containing the trusted certificates of this CertificateClient. + * During certificate rotation this TrustManager is automatically updated with the new certificates. + * + * @return A TrustManager containing trusted certificates for this CertificateClient. + * @throws CertificateException + */ ReloadingX509TrustManager getTrustManager() throws CertificateException; + /** + * Creates a ClientTrustManager instance using the trusted certificates of this certificate client. + * + * @return The new ClientTrustManager instance. + * @throws IOException + */ + ClientTrustManager createClientTrustManager() throws IOException; + /** * Register a receiver that will be called after the certificate renewed. * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java index 1f04e868a85..553b1dc812e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java @@ -27,13 +27,12 @@ import java.util.List; import java.util.Optional; +import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.commons.validator.routines.DomainValidator; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; - -import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.bouncycastle.asn1.ASN1EncodableVector; import org.bouncycastle.asn1.ASN1Object; @@ -390,7 +389,7 @@ private Optional getSubjectAltNameExtension() throws if (altNames != null) { return Optional.of(new Extension(Extension.subjectAlternativeName, false, new DEROctetString(new GeneralNames( - altNames.toArray(new GeneralName[altNames.size()]))))); + altNames.toArray(new GeneralName[0]))))); } return Optional.empty(); } @@ -414,12 +413,10 @@ private Extensions createExtensions() throws IOException { // Add subject alternate name extension Optional san = getSubjectAltNameExtension(); - if (san.isPresent()) { - extensions.add(san.get()); - } + san.ifPresent(extensions::add); return new Extensions( - extensions.toArray(new Extension[extensions.size()])); + extensions.toArray(new Extension[0])); } public CertificateSignRequest build() throws SCMSecurityException { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 31aaca568e4..66685b4bbbd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -131,7 +131,7 @@ public static InetSocketAddress updateListenAddress(OzoneConfiguration conf, * Fall back to OZONE_METADATA_DIRS if not defined. * * @param conf - * @return + * @return File */ public static File getScmDbDir(ConfigurationSource conf) { File metadataDir = getDirectoryFromConfig(conf, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java index b76a316c90b..386b1358b97 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java @@ -44,8 +44,6 @@ public static void main(String[] args) { System.out.println( "Source code repository " + HDDS_VERSION_INFO.getUrl() + " -r " + HDDS_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + HDDS_VERSION_INFO.getUser() + " on " - + HDDS_VERSION_INFO.getDate()); System.out.println( "Compiled with protoc " + HDDS_VERSION_INFO.getHadoopProtoc2Version() + ", " + HDDS_VERSION_INFO.getGrpcProtocVersion() + diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java index 477a291f928..9579d4e73bf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java @@ -31,7 +31,6 @@ /** * Simple general resource leak detector using {@link ReferenceQueue} and {@link java.lang.ref.WeakReference} to * observe resource object life-cycle and assert proper resource closure before they are GCed. - * *

    * Example usage: * @@ -43,16 +42,18 @@ * // report leaks, don't refer to the original object (MyResource) here. * System.out.println("MyResource is not closed before being discarded."); * }); - * - * @Override + * } + * } + * + *

    + *   {@code @Override
      *   public void close() {
      *     // proper resources cleanup...
      *     // inform tracker that this object is closed properly.
      *     leakTracker.close();
      *   }
    - * }
    - *
    - * }
    + * } + * */ public class LeakDetector { private static final Logger LOG = LoggerFactory.getLogger(LeakDetector.class); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java index 349c0a86206..d3de20cd476 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java @@ -65,18 +65,6 @@ public String getRevision() { return info.getProperty("revision", "Unknown"); } - public String getBranch() { - return info.getProperty("branch", "Unknown"); - } - - public String getDate() { - return info.getProperty("date", "Unknown"); - } - - public String getUser() { - return info.getProperty("user", "Unknown"); - } - public String getUrl() { return info.getProperty("url", "Unknown"); } @@ -108,7 +96,6 @@ public String getCompilePlatform() { public String getBuildVersion() { return getVersion() + " from " + getRevision() + - " by " + getUser() + " source checksum " + getSrcChecksum(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java index 2ec396c0ffa..6d416ea2ef3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java @@ -36,6 +36,11 @@ private BooleanCodec() { // singleton } + @Override + public Class getTypeClass() { + return Boolean.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java index 46779648e67..54bbf42c468 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java @@ -30,6 +30,9 @@ public interface Codec { byte[] EMPTY_BYTE_ARRAY = {}; + /** @return the class of the {@link T}. */ + Class getTypeClass(); + /** * Does this {@link Codec} support the {@link CodecBuffer} methods? * If this method returns true, this class must implement both diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java index 1ac293b301b..87be912bb53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java @@ -58,9 +58,9 @@ public class CodecBuffer implements UncheckedAutoCloseable { private static class Factory { private static volatile BiFunction constructor = CodecBuffer::new; - static void set(BiFunction f) { + static void set(BiFunction f, String name) { constructor = f; - LOG.info("Successfully set constructor to " + f); + LOG.info("Successfully set constructor to {}: {}", name, f); } static CodecBuffer newCodecBuffer(ByteBuf buf) { @@ -89,7 +89,7 @@ protected void finalize() { * Note that there is a severe performance penalty for leak detection. */ public static void enableLeakDetection() { - Factory.set(LeakDetector::newCodecBuffer); + Factory.set(LeakDetector::newCodecBuffer, "LeakDetector::newCodecBuffer"); } /** The size of a buffer. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java index dff0b015ed5..2ed92e66d2e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java @@ -23,9 +23,9 @@ import java.io.IOException; /** - * A {@link Codec} to serialize/deserialize objects by delegation. + * A {@link org.apache.hadoop.hdds.utils.db.Codec} to serialize/deserialize objects by delegation. * - * @param The object type of this {@link Codec}. + * @param The object type of this {@link org.apache.hadoop.hdds.utils.db.Codec}. * @param The object type of the {@link #delegate}. */ public class DelegatedCodec implements Codec { @@ -47,31 +47,39 @@ public enum CopyType { private final Codec delegate; private final CheckedFunction forward; private final CheckedFunction backward; + private final Class clazz; private final CopyType copyType; /** * Construct a {@link Codec} using the given delegate. * * @param delegate the delegate {@link Codec} - * @param forward a function to convert {@link DELEGATE} to {@link T}. - * @param backward a function to convert {@link T} back to {@link DELEGATE}. + * @param forward a function to convert {@code DELEGATE} to {@code T}. + * @param backward a function to convert {@code T} back to {@code DELEGATE}. * @param copyType How to {@link #copyObject(Object)}? */ public DelegatedCodec(Codec delegate, CheckedFunction forward, CheckedFunction backward, - CopyType copyType) { + Class clazz, CopyType copyType) { this.delegate = delegate; this.forward = forward; this.backward = backward; + this.clazz = clazz; this.copyType = copyType; } /** The same as new DelegatedCodec(delegate, forward, backward, DEEP). */ public DelegatedCodec(Codec delegate, CheckedFunction forward, - CheckedFunction backward) { - this(delegate, forward, backward, CopyType.DEEP); + CheckedFunction backward, + Class clazz) { + this(delegate, forward, backward, clazz, CopyType.DEEP); + } + + @Override + public Class getTypeClass() { + return clazz; } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java index 50488053159..d31be6fe976 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java @@ -36,6 +36,11 @@ private IntegerCodec() { // singleton } + @Override + public Class getTypeClass() { + return Integer.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java index 9e776cc18f7..cf481980008 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java @@ -33,6 +33,11 @@ public static LongCodec get() { private LongCodec() { } + @Override + public Class getTypeClass() { + return Long.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java index 96d12d1ebe5..8eb4a307215 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java @@ -47,12 +47,19 @@ public static Codec get(T t) { return (Codec) codec; } + private final Class clazz; private final Parser parser; private Proto2Codec(M m) { + this.clazz = (Class) m.getClass(); this.parser = (Parser) m.getParserForType(); } + @Override + public Class getTypeClass() { + return clazz; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java index 30245e033e0..c1eb693a007 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java @@ -47,12 +47,19 @@ public static Codec get(T t) { return (Codec) codec; } + private final Class clazz; private final Parser parser; private Proto3Codec(M m) { + this.clazz = (Class) m.getClass(); this.parser = (Parser) m.getParserForType(); } + @Override + public Class getTypeClass() { + return clazz; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java index f6482e5712c..beb296a29d1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java @@ -37,6 +37,11 @@ private ShortCodec() { // singleton } + @Override + public Class getTypeClass() { + return Short.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java index 1df55237937..e35be632dc4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java @@ -59,6 +59,11 @@ abstract class StringCodecBase implements Codec { this.fixedLength = max == encoder.averageBytesPerChar(); } + @Override + public final Class getTypeClass() { + return String.class; + } + CharsetEncoder newEncoder() { return charset.newEncoder() .onMalformedInput(CodingErrorAction.REPORT) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java index dfccaa0ab75..d05b748b52a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java @@ -40,6 +40,11 @@ public static int getSerializedSize() { private UuidCodec() { } + @Override + public Class getTypeClass() { + return UUID.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java index f3bd1a96b66..cc6695dc7d6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java @@ -42,10 +42,6 @@ public enum ClientVersion implements ComponentVersion { "This client version has support for Object Store and File " + "System Optimized Bucket Layouts."), - EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST(4, - "This client version enforces replica index is set for fixing read corruption that could occur when " + - "replicaIndex parameter is not validated before EC block reads."), - FUTURE_VERSION(-1, "Used internally when the server side is older and an" + " unknown client version has arrived from the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index c61502ff4a8..4c0df91e1a6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -120,6 +120,14 @@ public final class OzoneConfigKeys { public static final String OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT = "4MB"; + /** + * Flag to allow server-side HBase-related features and enhancements to be enabled. + */ + public static final String OZONE_HBASE_ENHANCEMENTS_ALLOWED + = "ozone.hbase.enhancements.allowed"; + public static final boolean OZONE_HBASE_ENHANCEMENTS_ALLOWED_DEFAULT + = false; + /** * Flag to enable hsync/hflush. */ @@ -193,9 +201,6 @@ public final class OzoneConfigKeys { "ozone.client.ec.grpc.write.timeout"; public static final String OZONE_CLIENT_EC_GRPC_WRITE_TIMEOUT_DEFAULT = "30s"; - public static final String OZONE_EC_GRPC_ZERO_COPY_ENABLED = - "ozone.ec.grpc.zerocopy.enabled"; - public static final boolean OZONE_EC_GRPC_ZERO_COPY_ENABLED_DEFAULT = true; /** * Ozone administrator users delimited by comma. @@ -535,10 +540,6 @@ public final class OzoneConfigKeys { public static final int OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT = 512; - public static final String OZONE_CLIENT_LIST_TRASH_KEYS_MAX = - "ozone.client.list.trash.keys.max"; - public static final int OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT = 1000; - public static final String OZONE_HTTP_BASEDIR = "ozone.http.basedir"; public static final String OZONE_HTTP_POLICY_KEY = @@ -567,11 +568,6 @@ public final class OzoneConfigKeys { "ozone.https.client.need-auth"; public static final boolean OZONE_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false; - public static final String OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY = - "ozone.om.keyname.character.check.enabled"; - public static final boolean OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT = - false; - public static final int OZONE_INIT_DEFAULT_LAYOUT_VERSION_DEFAULT = -1; public static final String OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY = "ozone.client.key.provider.cache.expiry"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index b34a5d8387b..49bfa1eae21 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -40,7 +40,6 @@ public final class OzoneConsts { public static final String SCM_CERT_SERIAL_ID = "scmCertSerialId"; public static final String PRIMARY_SCM_NODE_ID = "primaryScmNodeId"; - public static final String OZONE_SIMPLE_ROOT_USER = "root"; public static final String OZONE_SIMPLE_HDFS_USER = "hdfs"; public static final String STORAGE_ID = "storageID"; @@ -76,12 +75,6 @@ public final class OzoneConsts { "EEE, dd MMM yyyy HH:mm:ss zzz"; public static final String OZONE_TIME_ZONE = "GMT"; - public static final String OZONE_COMPONENT = "component"; - public static final String OZONE_FUNCTION = "function"; - public static final String OZONE_RESOURCE = "resource"; - public static final String OZONE_USER = "user"; - public static final String OZONE_REQUEST = "request"; - // OM Http server endpoints public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = "/serviceList"; @@ -101,14 +94,9 @@ public final class OzoneConsts { public static final String CONTAINER_EXTENSION = ".container"; - public static final String CONTAINER_META = ".meta"; - - // Refer to {@link ContainerReader} for container storage layout on disk. - public static final String CONTAINER_PREFIX = "containers"; public static final String CONTAINER_META_PATH = "metadata"; public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp"; public static final String CONTAINER_CHUNK_NAME_DELIMITER = "."; - public static final String CONTAINER_ROOT_PREFIX = "repository"; public static final String FILE_HASH = "SHA-256"; public static final String MD5_HASH = "MD5"; @@ -128,13 +116,13 @@ public final class OzoneConsts { * level DB names used by SCM and data nodes. */ public static final String CONTAINER_DB_SUFFIX = "container.db"; - public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX; public static final String OM_DB_NAME = "om.db"; public static final String SCM_DB_NAME = "scm.db"; public static final String OM_DB_BACKUP_PREFIX = "om.db.backup."; public static final String SCM_DB_BACKUP_PREFIX = "scm.db.backup."; public static final String CONTAINER_DB_NAME = "container.db"; + public static final String WITNESSED_CONTAINER_DB_NAME = "witnessed_container.db"; public static final String STORAGE_DIR_CHUNKS = "chunks"; public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH = @@ -187,10 +175,8 @@ public final class OzoneConsts { public static final String OM_USER_PREFIX = "$"; public static final String OM_S3_PREFIX = "S3:"; public static final String OM_S3_CALLER_CONTEXT_PREFIX = "S3Auth:S3G|"; - public static final String OM_S3_VOLUME_PREFIX = "s3"; public static final String OM_S3_SECRET = "S3Secret:"; public static final String OM_PREFIX = "Prefix:"; - public static final String OM_TENANT = "Tenant:"; /** * Max chunk size limit. @@ -198,11 +184,6 @@ public final class OzoneConsts { public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024; - /** - * Max OM Quota size of Long.MAX_VALUE. - */ - public static final long MAX_QUOTA_IN_BYTES = Long.MAX_VALUE; - /** * Quota RESET default is -1, which means quota is not set. */ @@ -214,36 +195,20 @@ public final class OzoneConsts { */ public enum Units { TB, GB, MB, KB, B } - /** - * Max number of keys returned per list buckets operation. - */ - public static final int MAX_LISTBUCKETS_SIZE = 1024; - - /** - * Max number of keys returned per list keys operation. - */ - public static final int MAX_LISTKEYS_SIZE = 1024; - - /** - * Max number of volumes returned per list volumes operation. - */ - public static final int MAX_LISTVOLUMES_SIZE = 1024; - - public static final int INVALID_PORT = -1; - /** * Object ID to identify reclaimable uncommitted blocks. */ public static final long OBJECT_ID_RECLAIM_BLOCKS = 0L; - /** * Default SCM Datanode ID file name. */ public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id"; - // The ServiceListJSONServlet context attribute where OzoneManager - // instance gets stored. + /** + * The ServiceListJSONServlet context attribute where OzoneManager + * instance gets stored. + */ public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om"; public static final String SCM_CONTEXT_ATTRIBUTE = "ozone.scm"; @@ -308,12 +273,8 @@ private OzoneConsts() { public static final String KEY_PREFIX = "keyPrefix"; public static final String ACL = "acl"; public static final String ACLS = "acls"; - public static final String USER_ACL = "userAcl"; - public static final String ADD_ACLS = "addAcls"; - public static final String REMOVE_ACLS = "removeAcls"; public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets"; public static final String HAS_SNAPSHOT = "hasSnapshot"; - public static final String TO_KEY_NAME = "toKeyName"; public static final String STORAGE_TYPE = "storageType"; public static final String RESOURCE_TYPE = "resourceType"; public static final String IS_VERSION_ENABLED = "isVersionEnabled"; @@ -323,7 +284,6 @@ private OzoneConsts() { public static final String REPLICATION_TYPE = "replicationType"; public static final String REPLICATION_FACTOR = "replicationFactor"; public static final String REPLICATION_CONFIG = "replicationConfig"; - public static final String KEY_LOCATION_INFO = "keyLocationInfo"; public static final String MULTIPART_LIST = "multipartList"; public static final String UPLOAD_ID = "uploadID"; public static final String PART_NUMBER_MARKER = "partNumberMarker"; @@ -378,10 +338,6 @@ private OzoneConsts() { public static final String JAVA_TMP_DIR = "java.io.tmpdir"; public static final String LOCALHOST = "localhost"; - - public static final int S3_BUCKET_MIN_LENGTH = 3; - public static final int S3_BUCKET_MAX_LENGTH = 64; - public static final int S3_SECRET_KEY_MIN_LENGTH = 8; public static final int S3_REQUEST_HEADER_METADATA_SIZE_LIMIT_KB = 2; @@ -398,7 +354,6 @@ private OzoneConsts() { public static final String GDPR_ALGORITHM_NAME = "AES"; public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16; public static final Charset GDPR_CHARSET = StandardCharsets.UTF_8; - public static final String GDPR_LENGTH = "length"; public static final String GDPR_SECRET = "secret"; public static final String GDPR_ALGORITHM = "algorithm"; @@ -409,7 +364,7 @@ private OzoneConsts() { * contains illegal characters when creating/renaming key. * * Avoid the following characters in a key name: - * "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]", Quotation + * {@literal "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]"}, Quotation * marks and Non-printable ASCII characters (128–255 decimal characters). * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html */ @@ -426,13 +381,6 @@ private OzoneConsts() { public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; - // SCM HA - public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault"; - - // SCM Ratis snapshot file to store the last applied index - public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex"; - - public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm"; // An on-disk transient marker file used when replacing DB with checkpoint public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker"; @@ -440,10 +388,7 @@ private OzoneConsts() { // should remain prepared even after a restart. public static final String PREPARE_MARKER = "prepareMarker"; - // TODO : rename this to OZONE_RATIS_SNAPSHOT_DIR and use it in both - // SCM and OM - public static final String OM_RATIS_SNAPSHOT_DIR = "snapshot"; - public static final String SCM_RATIS_SNAPSHOT_DIR = "snapshot"; + public static final String OZONE_RATIS_SNAPSHOT_DIR = "snapshot"; public static final long DEFAULT_OM_UPDATE_ID = -1L; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index eec2ceeb5e8..2d0b2bb56fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -44,6 +44,11 @@ public enum OzoneManagerVersion implements ComponentVersion { ATOMIC_REWRITE_KEY(6, "OzoneManager version that supports rewriting key as atomic operation"), HBASE_SUPPORT(7, "OzoneManager version that supports HBase integration"), + LIGHTWEIGHT_LIST_STATUS(8, "OzoneManager version that supports lightweight" + + " listStatus API."), + + S3_OBJECT_TAGGING_API(9, "OzoneManager version that supports S3 object tagging APIs, such as " + + "PutObjectTagging, GetObjectTagging, and DeleteObjectTagging"), FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java index f8b3febfeca..03771915be4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java @@ -33,6 +33,8 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class to compute and verify checksums for chunks. @@ -40,6 +42,8 @@ * This class is not thread safe. */ public class Checksum { + public static final Logger LOG = LoggerFactory.getLogger(Checksum.class); + private static Function newMessageDigestFunction( String algorithm) { final MessageDigest md; @@ -63,7 +67,7 @@ public static ByteString int2ByteString(int n) { private static Function newChecksumByteBufferFunction( Supplier constructor) { final ChecksumByteBuffer algorithm = constructor.get(); - return data -> { + return data -> { algorithm.reset(); algorithm.update(data); return int2ByteString((int)algorithm.getValue()); @@ -97,6 +101,23 @@ Function newChecksumFunction() { private final ChecksumType checksumType; private final int bytesPerChecksum; + /** + * Caches computeChecksum() result when requested. + * This must be manually cleared when a new block chunk has been started. + */ + private final ChecksumCache checksumCache; + + /** + * BlockOutputStream needs to call this method to clear the checksum cache + * whenever a block chunk has been established. + */ + public boolean clearChecksumCache() { + if (checksumCache != null) { + checksumCache.clear(); + return true; + } + return false; + } /** * Constructs a Checksum object. @@ -106,6 +127,24 @@ Function newChecksumFunction() { public Checksum(ChecksumType type, int bytesPerChecksum) { this.checksumType = type; this.bytesPerChecksum = bytesPerChecksum; + this.checksumCache = null; + } + + /** + * Constructs a Checksum object. + * @param type type of Checksum + * @param bytesPerChecksum number of bytes of data per checksum + * @param allowChecksumCache true to enable checksum cache + */ + public Checksum(ChecksumType type, int bytesPerChecksum, boolean allowChecksumCache) { + this.checksumType = type; + this.bytesPerChecksum = bytesPerChecksum; + LOG.debug("allowChecksumCache = {}", allowChecksumCache); + if (allowChecksumCache) { + this.checksumCache = new ChecksumCache(bytesPerChecksum); + } else { + this.checksumCache = null; + } } /** @@ -128,13 +167,25 @@ public ChecksumData computeChecksum(byte[] data) return computeChecksum(ByteBuffer.wrap(data)); } + /** + * The default implementation of computeChecksum(ByteBuffer) that does not use cache, even if cache is initialized. + * This is a stop-gap solution before the protocol change. + * @param data ByteBuffer + * @return ChecksumData + * @throws OzoneChecksumException + */ + public ChecksumData computeChecksum(ByteBuffer data) + throws OzoneChecksumException { + return computeChecksum(data, false); + } + /** * Computes checksum for give data. * @param data input data. * @return ChecksumData computed for input data. * @throws OzoneChecksumException thrown when ChecksumType is not recognized */ - public ChecksumData computeChecksum(ByteBuffer data) + public ChecksumData computeChecksum(ByteBuffer data, boolean useChecksumCache) throws OzoneChecksumException { // If type is set to NONE, we do not need to compute the checksums. We also // need to avoid unnecessary conversions. @@ -144,7 +195,7 @@ public ChecksumData computeChecksum(ByteBuffer data) if (!data.isReadOnly()) { data = data.asReadOnlyBuffer(); } - return computeChecksum(ChunkBuffer.wrap(data)); + return computeChecksum(ChunkBuffer.wrap(data), useChecksumCache); } public ChecksumData computeChecksum(List byteStrings) @@ -154,8 +205,20 @@ public ChecksumData computeChecksum(List byteStrings) return computeChecksum(ChunkBuffer.wrap(buffers)); } + /** + * The default implementation of computeChecksum(ChunkBuffer) that does not use cache, even if cache is initialized. + * This is a stop-gap solution before the protocol change. + * @param data ChunkBuffer + * @return ChecksumData + * @throws OzoneChecksumException + */ public ChecksumData computeChecksum(ChunkBuffer data) throws OzoneChecksumException { + return computeChecksum(data, false); + } + + public ChecksumData computeChecksum(ChunkBuffer data, boolean useCache) + throws OzoneChecksumException { if (checksumType == ChecksumType.NONE) { // Since type is set to NONE, we do not need to compute the checksums return new ChecksumData(checksumType, bytesPerChecksum); @@ -168,12 +231,20 @@ public ChecksumData computeChecksum(ChunkBuffer data) throw new OzoneChecksumException(checksumType); } - // Checksum is computed for each bytesPerChecksum number of bytes of data - // starting at offset 0. The last checksum might be computed for the - // remaining data with length less than bytesPerChecksum. - final List checksumList = new ArrayList<>(); - for (ByteBuffer b : data.iterate(bytesPerChecksum)) { - checksumList.add(computeChecksum(b, function, bytesPerChecksum)); + final List checksumList; + if (checksumCache == null || !useCache) { + // When checksumCache is not enabled: + // Checksum is computed for each bytesPerChecksum number of bytes of data + // starting at offset 0. The last checksum might be computed for the + // remaining data with length less than bytesPerChecksum. + checksumList = new ArrayList<>(); + for (ByteBuffer b : data.iterate(bytesPerChecksum)) { + checksumList.add(computeChecksum(b, function, bytesPerChecksum)); // merge this? + } + } else { + // When checksumCache is enabled: + // We only need to update the last checksum in the cache, then pass it along. + checksumList = checksumCache.computeChecksum(data, function); } return new ChecksumData(checksumType, bytesPerChecksum, checksumList); } @@ -185,7 +256,7 @@ public ChecksumData computeChecksum(ChunkBuffer data) * @param maxLength the max length of data * @return computed checksum ByteString */ - private static ByteString computeChecksum(ByteBuffer data, + protected static ByteString computeChecksum(ByteBuffer data, Function function, int maxLength) { final int limit = data.limit(); try { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java index 1d596bf7007..a5235978327 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java @@ -44,12 +44,14 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { static { Field f = null; - try { - f = ByteBuffer.class - .getDeclaredField("isReadOnly"); - f.setAccessible(true); - } catch (NoSuchFieldException e) { - LOG.error("No isReadOnly field in ByteBuffer", e); + if (JavaUtils.isJavaVersionAtMost(8)) { + try { + f = ByteBuffer.class + .getDeclaredField("isReadOnly"); + f.setAccessible(true); + } catch (NoSuchFieldException e) { + LOG.error("No isReadOnly field in ByteBuffer", e); + } } IS_READY_ONLY_FIELD = f; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java new file mode 100644 index 00000000000..0f6482919a3 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + +/** + * Cache previous checksums to avoid recomputing them. + * This is a stop-gap solution to reduce checksum calc overhead inside critical section + * without having to do a major refactoring/overhaul over protobuf and interfaces. + * This is only supposed to be used by BlockOutputStream, for now. + *

    + * Each BlockOutputStream has its own Checksum instance. + * Each block chunk (4 MB default) is divided into 16 KB (default) each for checksum calculation. + * For CRC32/CRC32C, each checksum takes 4 bytes. Thus each block chunk has 4 MB / 16 KB * 4 B = 1 KB of checksum data. + */ +public class ChecksumCache { + public static final Logger LOG = LoggerFactory.getLogger(ChecksumCache.class); + + private final int bytesPerChecksum; + private final List checksums; + // Chunk length last time the checksum is computed + private int prevChunkLength; + // This only serves as a hint for array list initial allocation. The array list will still grow as needed. + private static final int BLOCK_CHUNK_SIZE = 4 * 1024 * 1024; // 4 MB + + public ChecksumCache(int bytesPerChecksum) { + LOG.info("Initializing ChecksumCache with bytesPerChecksum = {}", bytesPerChecksum); + this.prevChunkLength = 0; + this.bytesPerChecksum = bytesPerChecksum; + // Set initialCapacity to avoid costly resizes + this.checksums = new ArrayList<>(BLOCK_CHUNK_SIZE / bytesPerChecksum); + } + + /** + * Clear cached checksums. And reset the written index. + */ + public void clear() { + prevChunkLength = 0; + checksums.clear(); + } + + public List getChecksums() { + return checksums; + } + + public List computeChecksum(ChunkBuffer data, Function function) { + // Indicates how much data the current chunk buffer holds + final int currChunkLength = data.limit(); + + if (currChunkLength == prevChunkLength) { + LOG.debug("ChunkBuffer data limit same as last time ({}). No new checksums need to be computed", prevChunkLength); + return checksums; + } + + // Sanity check + if (currChunkLength < prevChunkLength) { + // If currChunkLength <= lastChunkLength, it indicates a bug that needs to be addressed. + // It means BOS has not properly clear()ed the cache when a new chunk is started in that code path. + throw new IllegalArgumentException("ChunkBuffer data limit (" + currChunkLength + ")" + + " must not be smaller than last time (" + prevChunkLength + ")"); + } + + // One or more checksums need to be computed + + // Start of the checksum index that need to be (re)computed + final int ciStart = prevChunkLength / bytesPerChecksum; + final int ciEnd = currChunkLength / bytesPerChecksum + (currChunkLength % bytesPerChecksum == 0 ? 0 : 1); + int i = 0; + for (ByteBuffer b : data.iterate(bytesPerChecksum)) { + if (i < ciStart) { + i++; + continue; + } + + // variable i can either point to: + // 1. the last element in the list -- in which case the checksum needs to be updated + // 2. one after the last element -- in which case a new checksum needs to be added + assert i == checksums.size() - 1 || i == checksums.size(); + + // TODO: Furthermore for CRC32/CRC32C, it can be even more efficient by updating the last checksum byte-by-byte. + final ByteString checksum = Checksum.computeChecksum(b, function, bytesPerChecksum); + if (i == checksums.size()) { + checksums.add(checksum); + } else { + checksums.set(i, checksum); + } + + i++; + } + + // Sanity check + if (i != ciEnd) { + throw new IllegalStateException("ChecksumCache: Checksum index end does not match expectation"); + } + + // Update last written index + prevChunkLength = currChunkLength; + return checksums; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index 058934c2f27..a24d39e5dac 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -39,13 +39,12 @@ static ChunkBuffer allocate(int capacity) { return allocate(capacity, 0); } - /** - * Similar to {@link ByteBuffer#allocate(int)} + /** Similar to {@link ByteBuffer#allocate(int)} * except that it can specify the increment. * * @param increment * the increment size so that this buffer is allocated incrementally. - * When increment <= 0, entire buffer is allocated in the beginning. + * When increment {@literal <= 0}, entire buffer is allocated in the beginning. */ static ChunkBuffer allocate(int capacity, int increment) { if (increment > 0 && increment < capacity) { @@ -60,7 +59,8 @@ static ChunkBuffer wrap(ByteBuffer buffer) { return new ChunkBufferImplWithByteBuffer(buffer); } - /** Wrap the given list of {@link ByteBuffer}s as a {@link ChunkBuffer}. */ + /** Wrap the given list of {@link ByteBuffer}s as a {@link ChunkBuffer}, + * with a function called when buffers are released.*/ static ChunkBuffer wrap(List buffers) { Objects.requireNonNull(buffers, "buffers == null"); if (buffers.size() == 1) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java index 36c16e92bf0..254be93dc4a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java @@ -25,9 +25,9 @@ import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.UUID; import java.util.function.Function; +import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.util.UncheckedAutoCloseable; @@ -35,7 +35,6 @@ final class ChunkBufferImplWithByteBuffer implements ChunkBuffer { private final ByteBuffer buffer; private final UncheckedAutoCloseable underlying; - private final UUID identity = UUID.randomUUID(); ChunkBufferImplWithByteBuffer(ByteBuffer buffer) { this(buffer, null); @@ -104,7 +103,7 @@ public List asByteBufferList() { @Override public long writeTo(GatheringByteChannel channel) throws IOException { - return channel.write(buffer); + return BufferUtils.writeFully(channel, buffer); } @Override @@ -163,6 +162,6 @@ public int hashCode() { @Override public String toString() { return getClass().getSimpleName() + ":limit=" + buffer.limit() - + "@" + identity; + + "@" + Integer.toHexString(super.hashCode()); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java index a3b5f9d2eef..e1f169662f8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java @@ -23,6 +23,8 @@ import java.util.Collections; import java.util.Iterator; import java.util.NoSuchElementException; + +import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.io.IOException; @@ -246,9 +248,9 @@ public List asByteBufferList() { @Override public long writeTo(GatheringByteChannel channel) throws IOException { - long bytes = channel.write(buffers.toArray(new ByteBuffer[0])); + final long written = BufferUtils.writeFully(channel, buffers); findCurrent(); - return bytes; + return written; } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index dda4fae0d2b..732af4b6850 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -19,6 +19,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.io.IOException; @@ -279,7 +280,7 @@ public List asByteBufferList() { @Override public long writeTo(GatheringByteChannel channel) throws IOException { - return channel.write(buffers.toArray(new ByteBuffer[0])); + return BufferUtils.writeFully(channel, buffers); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java index c6ad754f19b..a266c3615b0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java @@ -19,15 +19,23 @@ package org.apache.hadoop.ozone.common.utils; import com.google.common.base.Preconditions; + +import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; import java.util.ArrayList; import java.util.List; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Utilities for buffers. */ public final class BufferUtils { + public static final Logger LOG = LoggerFactory.getLogger(BufferUtils.class); + + private static final ByteBuffer[] EMPTY_BYTE_BUFFER_ARRAY = {}; /** Utility classes should not be constructed. **/ private BufferUtils() { @@ -136,4 +144,46 @@ public static int getNumberOfBins(long numElements, int maxElementsPerBin) { } return Math.toIntExact(n); } + + /** + * Write all remaining bytes in buffer to the given channel. + */ + public static long writeFully(GatheringByteChannel ch, ByteBuffer bb) throws IOException { + long written = 0; + while (bb.remaining() > 0) { + int n = ch.write(bb); + if (n < 0) { + throw new IllegalStateException("GatheringByteChannel.write returns " + n + " < 0 for " + ch); + } + written += n; + } + return written; + } + + public static long writeFully(GatheringByteChannel ch, List buffers) throws IOException { + return BufferUtils.writeFully(ch, buffers.toArray(EMPTY_BYTE_BUFFER_ARRAY)); + } + + public static long writeFully(GatheringByteChannel ch, ByteBuffer[] buffers) throws IOException { + if (LOG.isDebugEnabled()) { + for (int i = 0; i < buffers.length; i++) { + LOG.debug("buffer[{}]: remaining={}", i, buffers[i].remaining()); + } + } + + long written = 0; + for (int i = 0; i < buffers.length; i++) { + while (buffers[i].remaining() > 0) { + final long n = ch.write(buffers, i, buffers.length - i); + if (LOG.isDebugEnabled()) { + LOG.debug("buffer[{}]: remaining={}, written={}", i, buffers[i].remaining(), n); + } + if (n < 0) { + throw new IllegalStateException("GatheringByteChannel.write returns " + n + " < 0 for " + ch); + } + written += n; + } + } + return written; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index 4bd170df8e8..4fee39921b6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.Proto3Codec; +import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; import java.util.Collections; @@ -38,7 +39,8 @@ public class BlockData { private static final Codec CODEC = new DelegatedCodec<>( Proto3Codec.get(ContainerProtos.BlockData.getDefaultInstance()), BlockData::getFromProtoBuf, - BlockData::getProtoBufMessage); + BlockData::getProtoBufMessage, + BlockData.class); public static Codec getCodec() { return CODEC; @@ -252,7 +254,7 @@ public void setChunks(List chunks) { size = singleChunk.getLen(); } else { chunkList = chunks; - size = chunks.parallelStream() + size = chunks.stream() .mapToLong(ContainerProtos.ChunkInfo::getLen) .sum(); } @@ -280,4 +282,14 @@ public void appendTo(StringBuilder sb) { sb.append(", size=").append(size); sb.append("]"); } + + public long getBlockGroupLength() { + String lenStr = getMetadata() + .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK); + // If we don't have the length, then it indicates a problem with the stripe. + // All replica should carry the length, so if it is not there, we return 0, + // which will cause us to set the length of the block to zero and not + // attempt to reconstruct it. + return (lenStr == null) ? 0 : Long.parseLong(lenStr); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java index fdf40af9e09..ab5d39e9c3d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java @@ -27,7 +27,7 @@ /** * Helper class to convert between protobuf lists and Java lists of - * {@link ContainerProtos.ChunkInfo} objects. + * {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo} objects. *

    * This class is immutable. */ @@ -36,6 +36,7 @@ public class ChunkInfoList { Proto3Codec.get(ContainerProtos.ChunkInfoList.getDefaultInstance()), ChunkInfoList::getFromProtoBuf, ChunkInfoList::getProtoBufMessage, + ChunkInfoList.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { @@ -49,7 +50,7 @@ public ChunkInfoList(List chunks) { } /** - * @return A new {@link ChunkInfoList} created from protobuf data. + * @return A new {@link #ChunkInfoList} created from protobuf data. */ public static ChunkInfoList getFromProtoBuf( ContainerProtos.ChunkInfoList chunksProto) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java index 6bd83b44a93..6e0dde66986 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java @@ -33,6 +33,7 @@ import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.util.MetricUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,6 +101,8 @@ public static synchronized GrpcMetrics create(Configuration conf) { */ public void unRegister() { DefaultMetricsSystem.instance().unregisterSource(SOURCE_NAME); + MetricUtil.stop(grpcProcessingTimeMillisQuantiles); + MetricUtil.stop(grpcQueueTimeMillisQuantiles); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java index 83e63a2a322..b94dd024b2d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java @@ -37,18 +37,16 @@ /** * Generic factory which stores different instances of Type 'T' sharded by - * a key & version. A single key can be associated with different versions + * a key and version. A single key can be associated with different versions * of 'T'. - * * Why does this class exist? * A typical use case during upgrade is to have multiple versions of a class * / method / object and chose them based on current layout * version at runtime. Before finalizing, an older version is typically * needed, and after finalize, a newer version is needed. This class serves * this purpose in a generic way. - * * For example, we can create a Factory to create multiple versions of - * OMRequests sharded by Request Type & Layout Version Supported. + * OMRequests sharded by Request Type and Layout Version Supported. */ public class LayoutVersionInstanceFactory { @@ -71,7 +69,7 @@ public class LayoutVersionInstanceFactory { /** * Register an instance with a given factory key (key + version). * For safety reasons we dont allow (1) re-registering, (2) registering an - * instance with version > SLV. + * instance with version > SLV. * * @param lvm LayoutVersionManager * @param key VersionFactoryKey key to associate with instance. @@ -138,13 +136,15 @@ private boolean isValid(LayoutVersionManager lvm, int version) { } /** + *

        * From the list of versioned instances for a given "key", this
        * returns the "floor" value corresponding to the given version.
    -   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
    -   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
    +   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
    +   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
        * return CreateKeyV1.
        * Since this is a priority queue based implementation, we use a O(1) peek()
        * lookup to get the current valid version.
    +   * 
    * @param lvm LayoutVersionManager * @param key Key and Version. * @return instance. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java index 3137d756e6b..a765c2c9455 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java @@ -74,7 +74,6 @@ public interface LayoutVersionManager { /** * Generic API for returning a registered handler for a given type. * @param type String type - * @return */ default Object getHandler(String type) { return null; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java index 44ae94870e3..19c0498aa7a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java @@ -50,14 +50,14 @@ public interface UpgradeFinalizer { * Represents the current state in which the service is with regards to * finalization after an upgrade. * The state transitions are the following: - * ALREADY_FINALIZED - no entry no exit from this status without restart. + * {@code ALREADY_FINALIZED} - no entry no exit from this status without restart. * After an upgrade: - * FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION - * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE from finalization done + * {@code FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION + * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE} from finalization done * there is no more move possible, after a restart the service can end up in: - * - FINALIZATION_REQUIRED, if the finalization failed and have not reached - * FINALIZATION_DONE, - * - or it can be ALREADY_FINALIZED if the finalization was successfully done. + * {@code FINALIZATION_REQUIRED}, if the finalization failed and have not reached + * {@code FINALIZATION_DONE}, + * - or it can be {@code ALREADY_FINALIZED} if the finalization was successfully done. */ enum Status { ALREADY_FINALIZED, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java index bda45f5a745..6465cc85501 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java @@ -20,7 +20,7 @@ /** * "Key" element to the Version specific instance factory. Currently it has 2 - * dimensions -> a 'key' string and a version. This is to support a factory + * dimensions -> a 'key' string and a version. This is to support a factory * which returns an instance for a given "key" and "version". */ public class VersionFactoryKey { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java index 23ff3c0f29e..9d903b900ac 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java @@ -110,4 +110,20 @@ public static List createQuantiles(MetricsRegistry registry, sampleName, valueName, interval); }).collect(Collectors.toList()); } + + public static void stop(MutableQuantiles... quantiles) { + if (quantiles != null) { + stop(Arrays.asList(quantiles)); + } + } + + public static void stop(Iterable quantiles) { + if (quantiles != null) { + for (MutableQuantiles q : quantiles) { + if (q != null) { + q.stop(); + } + } + } + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java index 3f5150bd62c..39e887eaa49 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java @@ -22,7 +22,9 @@ import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableStat; +import java.io.Closeable; import java.util.List; +import java.util.Map; /** * The {@code PerformanceMetrics} class encapsulates a collection of related @@ -30,7 +32,7 @@ * This class provides methods to update these metrics and to * snapshot their values for reporting. */ -public class PerformanceMetrics { +public class PerformanceMetrics implements Closeable { private final MutableStat stat; private final List quantiles; private final MutableMinMax minMax; @@ -43,12 +45,13 @@ public class PerformanceMetrics { * @param intervals the intervals for quantiles computation. Note, each * interval in 'intervals' increases memory usage, as it corresponds * to a separate quantile calculator. + * @return {@link PerformanceMetrics} instances created, mapped by field name */ - public static synchronized void initializeMetrics(T source, + public static synchronized Map initializeMetrics(T source, MetricsRegistry registry, String sampleName, String valueName, int[] intervals) { try { - PerformanceMetricsInitializer.initialize( + return PerformanceMetricsInitializer.initialize( source, registry, sampleName, valueName, intervals); } catch (IllegalAccessException e) { throw new RuntimeException("Failed to initialize PerformanceMetrics", e); @@ -73,6 +76,11 @@ public PerformanceMetrics( minMax = new MutableMinMax(registry, name, description, valueName); } + @Override + public void close() { + MetricUtil.stop(quantiles); + } + /** * Adds a value to all the aggregated metrics. * @@ -95,6 +103,5 @@ public void snapshot(MetricsRecordBuilder recordBuilder, boolean all) { this.quantiles.forEach(quantile -> quantile.snapshot(recordBuilder, all)); this.minMax.snapshot(recordBuilder, all); } - } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java index b2e83bb780c..cb6f77e9f5c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java @@ -21,6 +21,8 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Map; /** * Utility class for initializing PerformanceMetrics in a MetricsSource. @@ -36,11 +38,13 @@ private PerformanceMetricsInitializer() { } * @param sampleName sample name * @param valueName value name * @param intervals intervals for quantiles + * @return {@link PerformanceMetrics} instances created, mapped by field name * @throws IllegalAccessException if unable to access the field */ - public static void initialize(T source, MetricsRegistry registry, + public static Map initialize(T source, MetricsRegistry registry, String sampleName, String valueName, int[] intervals) throws IllegalAccessException { + Map instances = new HashMap<>(); Field[] fields = source.getClass().getDeclaredFields(); for (Field field : fields) { @@ -54,8 +58,11 @@ public static void initialize(T source, MetricsRegistry registry, sampleName, valueName, intervals); field.setAccessible(true); field.set(source, performanceMetrics); + instances.put(name, performanceMetrics); } } } + + return instances; } } diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties index bf887021c5b..3ba2c2cbfa2 100644 --- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties +++ b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties @@ -18,9 +18,6 @@ version=${declared.hdds.version} revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} hadoopProtoc2Version=${proto2.hadooprpc.protobuf.version} diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 20c1bed89be..fdeb5c1c043 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -160,6 +160,13 @@ this not set. Ideally, this should be mapped to a fast disk like an SSD. + + ozone.scm.container.list.max.count + 4096 + OZONE, SCM, CONTAINER + The max number of containers info could be included in + response of ListContainer request. + hdds.datanode.dir @@ -272,16 +279,16 @@ hdds.ratis.snapshot.threshold - 10000 - OZONE, RATIS + 100000 + OZONE, CONTAINER, RATIS Number of transactions after which a ratis snapshot should be taken. hdds.container.ratis.statemachine.max.pending.apply-transactions - 10000 - OZONE, RATIS + 100000 + OZONE, CONTAINER, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold hdds.ratis.snapshot.threshold. @@ -860,6 +867,15 @@ The default read threshold to use memory mapped buffers. + + ozone.chunk.read.mapped.buffer.max.count + 0 + OZONE, SCM, CONTAINER, PERFORMANCE + + The default max count of memory mapped buffers allowed for a DN. + Default 0 means no mapped buffers allowed for data read. + + ozone.scm.container.layout FILE_PER_BLOCK @@ -1561,7 +1577,7 @@ hdds.datanode.metadata.rocksdb.cache.size - 64MB + 1GB OZONE, DATANODE, MANAGEMENT Size of the block metadata cache shared among RocksDB instances on each @@ -3406,14 +3422,6 @@ unhealthy will each have their own limit. - - ozone.client.list.trash.keys.max - 1000 - OZONE, CLIENT - - The maximum number of keys to return for a list trash request. - - ozone.http.basedir @@ -3468,9 +3476,9 @@ ozone.s3g.client.buffer.size OZONE, S3GATEWAY - 4KB + 4MB - The size of the buffer which is for read block. (4KB by default). + The size of the buffer which is for read block. (4MB by default). @@ -3742,6 +3750,15 @@ + + ozone.snapshot.deep.cleaning.enabled + false + OZONE, PERFORMANCE, OM + + Flag to enable/disable snapshot deep cleaning. + + + ozone.scm.event.ContainerReport.thread.pool.size 10 @@ -4224,12 +4241,27 @@ + + ozone.hbase.enhancements.allowed + false + OZONE, OM + + When set to false, server-side HBase enhancement-related Ozone (experimental) features + are disabled (not allowed to be enabled) regardless of whether those configs are set. + + Here is the list of configs and values overridden when this config is set to false: + 1. ozone.fs.hsync.enabled = false + + A warning message will be printed if any of the above configs are overridden by this. + + ozone.fs.hsync.enabled false - OZONE, CLIENT + OZONE, CLIENT, OM - Enable hsync/hflush. By default they are disabled. + Enable hsync/hflush on the Ozone Manager and/or client side. Disabled by default. + Can be enabled only when ozone.hbase.enhancements.allowed = true @@ -4505,19 +4537,31 @@ - ozone.ec.grpc.zerocopy.enabled - true + ozone.om.max.buckets + 100000 + OZONE, OM + + maximum number of buckets across all volumes. + + + + + ozone.volume.io.percentiles.intervals.seconds + 60 OZONE, DATANODE - Specify if zero-copy should be enabled for EC GRPC protocol. + This setting specifies the interval (in seconds) for monitoring percentile performance metrics. + It helps in tracking the read and write performance of DataNodes in real-time, + allowing for better identification and analysis of performance issues. + - ozone.om.max.buckets - 100000 + ozone.om.server.list.max.size + 1000 OZONE, OM - maximum number of buckets across all volumes. + Configuration property to configure the max server side response size for list calls on om. diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java index aeb1e207e70..78465fd2816 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java @@ -17,12 +17,16 @@ */ package org.apache.hadoop.hdds.protocol; +import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.junit.jupiter.api.Test; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.ALL_PORTS; import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.V0_PORTS; @@ -48,21 +52,36 @@ void protoIncludesNewPortsOnlyForV1() { subject.toProto(VERSION_HANDLES_UNKNOWN_DN_PORTS.toProtoValue()); assertPorts(protoV1, ALL_PORTS); } + @Test + void testRequiredPortsProto() { + DatanodeDetails subject = MockDatanodeDetails.randomDatanodeDetails(); + Set requiredPorts = Stream.of(Port.Name.STANDALONE, Port.Name.RATIS) + .collect(Collectors.toSet()); + HddsProtos.DatanodeDetailsProto proto = + subject.toProto(subject.getCurrentVersion(), requiredPorts); + assertPorts(proto, ImmutableSet.copyOf(requiredPorts)); + + HddsProtos.DatanodeDetailsProto ioPortProto = + subject.toProto(subject.getCurrentVersion(), Name.IO_PORTS); + assertPorts(ioPortProto, ImmutableSet.copyOf(Name.IO_PORTS)); + } @Test public void testNewBuilderCurrentVersion() { // test that if the current version is not set (Ozone 1.4.0 and earlier), // it falls back to SEPARATE_RATIS_PORTS_AVAILABLE DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + Set requiredPorts = Stream.of(Port.Name.STANDALONE, Port.Name.RATIS) + .collect(Collectors.toSet()); HddsProtos.DatanodeDetailsProto.Builder protoBuilder = - dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue()); + dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue(), requiredPorts); protoBuilder.clearCurrentVersion(); DatanodeDetails dn2 = DatanodeDetails.newBuilder(protoBuilder.build()).build(); assertEquals(DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(), dn2.getCurrentVersion()); // test that if the current version is set, it is used protoBuilder = - dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue()); + dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue(), requiredPorts); DatanodeDetails dn3 = DatanodeDetails.newBuilder(protoBuilder.build()).build(); assertEquals(DatanodeVersion.CURRENT.toProtoValue(), dn3.getCurrentVersion()); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java index 5571330ee64..d8af0c4d5ab 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java @@ -37,19 +37,19 @@ public class TestExcludeList { public void excludeNodesShouldBeCleanedBasedOnGivenTime() { ExcludeList list = new ExcludeList(10, clock); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.1").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.1").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); assertEquals(1, list.getDatanodes().size()); clock.fastForward(11); assertEquals(0, list.getDatanodes().size()); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.2").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.2").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.3").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.3").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); assertEquals(2, list.getDatanodes().size()); } @@ -58,8 +58,8 @@ public void excludeNodesShouldBeCleanedBasedOnGivenTime() { public void excludeNodeShouldNotBeCleanedIfExpiryTimeIsZero() { ExcludeList list = new ExcludeList(0, clock); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.1").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.1").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); assertEquals(1, list.getDatanodes().size()); clock.fastForward(1); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java index ce6f58dadcb..83b68512380 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java @@ -21,8 +21,11 @@ import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; import java.nio.channels.WritableByteChannel; +import java.util.concurrent.ThreadLocalRandom; import static com.google.common.base.Preconditions.checkElementIndex; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * {@link GatheringByteChannel} implementation for testing. Delegates @@ -45,11 +48,32 @@ public long write(ByteBuffer[] srcs, int offset, int length) checkElementIndex(offset, srcs.length, "offset"); checkElementIndex(offset + length - 1, srcs.length, "offset+length"); - long bytes = 0; - for (ByteBuffer b : srcs) { - bytes += write(b); + long fullLength = 0; + for (int i = offset; i < srcs.length; i++) { + fullLength += srcs[i].remaining(); } - return bytes; + if (fullLength <= 0) { + return 0; + } + + // simulate partial write by setting a random partial length + final long partialLength = ThreadLocalRandom.current().nextLong(fullLength + 1); + + long written = 0; + for (int i = offset; i < srcs.length; i++) { + for (final ByteBuffer src = srcs[i]; src.hasRemaining();) { + final long n = partialLength - written; // write at most n bytes + assertThat(n).isGreaterThanOrEqualTo(0); + if (n == 0) { + return written; + } + + final int remaining = src.remaining(); + final int adjustment = remaining <= n ? 0 : Math.toIntExact(remaining - n); + written += adjustedWrite(src, adjustment); + } + } + return written; } @Override @@ -59,7 +83,40 @@ public long write(ByteBuffer[] srcs) throws IOException { @Override public int write(ByteBuffer src) throws IOException { - return delegate.write(src); + final int remaining = src.remaining(); + if (remaining <= 0) { + return 0; + } + // Simulate partial write by a random adjustment. + final int adjustment = ThreadLocalRandom.current().nextInt(remaining + 1); + return adjustedWrite(src, adjustment); + } + + /** Simulate partial write by the given adjustment. */ + private int adjustedWrite(ByteBuffer src, int adjustment) throws IOException { + assertThat(adjustment).isGreaterThanOrEqualTo(0); + final int remaining = src.remaining(); + if (remaining <= 0) { + return 0; + } + assertThat(adjustment).isLessThanOrEqualTo(remaining); + + final int oldLimit = src.limit(); + final int newLimit = oldLimit - adjustment; + src.limit(newLimit); + assertEquals(newLimit, src.limit()); + final int toWrite = remaining - adjustment; + assertEquals(toWrite, src.remaining()); + + final int written = delegate.write(src); + assertEquals(newLimit, src.limit()); + assertEquals(toWrite - written, src.remaining()); + + src.limit(oldLimit); + assertEquals(oldLimit, src.limit()); + assertEquals(remaining - written, src.remaining()); + + return written; } @Override diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java index 829f4bb150c..7ddb605c0f8 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java @@ -19,7 +19,10 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.nio.ByteBuffer; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -35,23 +38,25 @@ public class TestChecksum { private static final ContainerProtos.ChecksumType CHECKSUM_TYPE_DEFAULT = ContainerProtos.ChecksumType.SHA256; - private Checksum getChecksum(ContainerProtos.ChecksumType type) { + private Checksum getChecksum(ContainerProtos.ChecksumType type, boolean allowChecksumCache) { if (type == null) { type = CHECKSUM_TYPE_DEFAULT; } - return new Checksum(type, BYTES_PER_CHECKSUM); + return new Checksum(type, BYTES_PER_CHECKSUM, allowChecksumCache); } /** * Tests {@link Checksum#verifyChecksum(byte[], ChecksumData)}. */ - @Test - public void testVerifyChecksum() throws Exception { - Checksum checksum = getChecksum(null); + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testVerifyChecksum(boolean useChecksumCache) throws Exception { + Checksum checksum = getChecksum(null, useChecksumCache); int dataLen = 55; byte[] data = RandomStringUtils.randomAlphabetic(dataLen).getBytes(UTF_8); + ByteBuffer byteBuffer = ByteBuffer.wrap(data); - ChecksumData checksumData = checksum.computeChecksum(data); + ChecksumData checksumData = checksum.computeChecksum(byteBuffer, useChecksumCache); // A checksum is calculate for each bytesPerChecksum number of bytes in // the data. Since that value is 10 here and the data length is 55, we @@ -65,11 +70,13 @@ public void testVerifyChecksum() throws Exception { /** * Tests that if data is modified, then the checksums should not match. */ - @Test - public void testIncorrectChecksum() throws Exception { - Checksum checksum = getChecksum(null); + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testIncorrectChecksum(boolean useChecksumCache) throws Exception { + Checksum checksum = getChecksum(null, useChecksumCache); byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(UTF_8); - ChecksumData originalChecksumData = checksum.computeChecksum(data); + ByteBuffer byteBuffer = ByteBuffer.wrap(data); + ChecksumData originalChecksumData = checksum.computeChecksum(byteBuffer, useChecksumCache); // Change the data and check if new checksum matches the original checksum. // Modifying one byte of data should be enough for the checksum data to @@ -83,13 +90,14 @@ public void testIncorrectChecksum() throws Exception { * Tests that checksum calculated using two different checksumTypes should * not match. */ - @Test - public void testChecksumMismatchForDifferentChecksumTypes() { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testChecksumMismatchForDifferentChecksumTypes(boolean useChecksumCache) { // Checksum1 of type SHA-256 - Checksum checksum1 = getChecksum(null); + Checksum checksum1 = getChecksum(null, useChecksumCache); // Checksum2 of type CRC32 - Checksum checksum2 = getChecksum(ContainerProtos.ChecksumType.CRC32); + Checksum checksum2 = getChecksum(ContainerProtos.ChecksumType.CRC32, useChecksumCache); // The two checksums should not match as they have different types assertNotEquals(checksum1, checksum2, "Checksums should not match for different checksum types"); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumCache.java new file mode 100644 index 00000000000..49e0b75127a --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumCache.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; +import org.apache.hadoop.ozone.common.Checksum.Algorithm; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.function.Function; + +/** + * Test class for {@link ChecksumCache}. + */ +class TestChecksumCache { + public static final Logger LOG = LoggerFactory.getLogger(TestChecksumCache.class); + + @ParameterizedTest + @EnumSource(ChecksumType.class) + void testComputeChecksum(ChecksumType checksumType) throws Exception { + final int bytesPerChecksum = 16; + ChecksumCache checksumCache = new ChecksumCache(bytesPerChecksum); + + final int size = 66; + byte[] byteArray = new byte[size]; + // Fill byteArray with bytes from 0 to 127 for deterministic testing + for (int i = 0; i < size; i++) { + byteArray[i] = (byte) (i % 128); + } + + final Function function = Algorithm.valueOf(checksumType).newChecksumFunction(); + + int iEnd = size / bytesPerChecksum + (size % bytesPerChecksum == 0 ? 0 : 1); + List lastRes = null; + for (int i = 0; i < iEnd; i++) { + int byteBufferLength = Integer.min(byteArray.length, bytesPerChecksum * (i + 1)); + ByteBuffer byteBuffer = ByteBuffer.wrap(byteArray, 0, byteBufferLength); + + try (ChunkBuffer chunkBuffer = ChunkBuffer.wrap(byteBuffer.asReadOnlyBuffer())) { + List res = checksumCache.computeChecksum(chunkBuffer, function); + System.out.println(res); + // Verify that every entry in the res list except the last one is the same as the one in lastRes list + if (i > 0) { + for (int j = 0; j < res.size() - 1; j++) { + Assertions.assertEquals(lastRes.get(j), res.get(j)); + } + } + lastRes = res; + } + } + + // Sanity check + checksumCache.clear(); + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 2b7592e1c35..20372dcc6ea 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -293,18 +293,31 @@ public static Builder newReadChunkRequestBuilder(Pipeline pipeline, */ public static ContainerCommandRequestProto getCreateContainerRequest( long containerID, Pipeline pipeline) throws IOException { + return getCreateContainerRequest(containerID, pipeline, ContainerProtos.ContainerDataProto.State.OPEN); + } + + + /** + * Returns a create container command for test purposes. There are a bunch of + * tests where we need to just send a request and get a reply. + * + * @return ContainerCommandRequestProto. + */ + public static ContainerCommandRequestProto getCreateContainerRequest( + long containerID, Pipeline pipeline, ContainerProtos.ContainerDataProto.State state) throws IOException { LOG.trace("addContainer: {}", containerID); - return getContainerCommandRequestBuilder(containerID, pipeline).build(); + return getContainerCommandRequestBuilder(containerID, pipeline, state) + .build(); } private static Builder getContainerCommandRequestBuilder(long containerID, - Pipeline pipeline) throws IOException { + Pipeline pipeline, ContainerProtos.ContainerDataProto.State state) throws IOException { Builder request = ContainerCommandRequestProto.newBuilder(); request.setCmdType(ContainerProtos.Type.CreateContainer); request.setContainerID(containerID); request.setCreateContainer( - ContainerProtos.CreateContainerRequestProto.getDefaultInstance()); + ContainerProtos.CreateContainerRequestProto.getDefaultInstance().toBuilder().setState(state).build()); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); return request; @@ -320,7 +333,8 @@ public static ContainerCommandRequestProto getCreateContainerSecureRequest( long containerID, Pipeline pipeline, Token token) throws IOException { LOG.trace("addContainer: {}", containerID); - Builder request = getContainerCommandRequestBuilder(containerID, pipeline); + Builder request = getContainerCommandRequestBuilder(containerID, pipeline, + ContainerProtos.ContainerDataProto.State.OPEN); if (token != null) { request.setEncodedToken(token.encodeToUrlString()); } diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 1c71bf3d90a..60c63475ae3 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-config - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Config Tools Apache Ozone HDDS Config jar diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java index b1a20c9aecb..0d6c0c90878 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java @@ -108,7 +108,7 @@ default String[] getTrimmedStrings(String name) { /** * Gets the configuration entries where the key contains the prefix. This * method will strip the prefix from the key in the return Map. - * Example: somePrefix.key->value will be key->value in the returned map. + * Example: {@code somePrefix.key->value} will be {@code key->value} in the returned map. * @param keyPrefix Prefix to search. * @return Map containing keys that match and their values. */ diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index d73bea95895..c21ca8203b5 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-container-service - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Container Service Apache Ozone HDDS Container Service jar diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java index d36fcdb6fc7..9c077a8e27b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java @@ -26,4 +26,32 @@ */ @InterfaceAudience.Private public interface DNMXBean extends ServiceRuntimeInfo { + + /** + * Gets the datanode hostname. + * + * @return the datanode hostname for the datanode. + */ + String getHostname(); + + /** + * Gets the client rpc port. + * + * @return the client rpc port + */ + String getClientRpcPort(); + + /** + * Gets the http port. + * + * @return the http port + */ + String getHttpPort(); + + /** + * Gets the https port. + * + * @return the http port + */ + String getHttpsPort(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java index f7b484c6bb3..5a0a4556636 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java @@ -25,8 +25,53 @@ * This is the JMX management class for DN information. */ public class DNMXBeanImpl extends ServiceRuntimeInfoImpl implements DNMXBean { - public DNMXBeanImpl( - VersionInfo versionInfo) { + + private String hostName; + private String clientRpcPort; + private String httpPort; + private String httpsPort; + + public DNMXBeanImpl(VersionInfo versionInfo) { super(versionInfo); } + + @Override + public String getHostname() { + return hostName; + } + + @Override + public String getClientRpcPort() { + return clientRpcPort; + } + + @Override + public String getHttpPort() { + return httpPort; + } + + @Override + public String getHttpsPort() { + return httpsPort; + } + + public void setHttpPort(String httpPort) { + this.httpPort = httpPort; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public void setClientRpcPort(String rpcPort) { + this.clientRpcPort = rpcPort; + } + + public String getHostName() { + return hostName; + } + + public void setHttpsPort(String httpsPort) { + this.httpsPort = httpsPort; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 6b32b74dc7c..de21e37503a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeyClient; import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; @@ -116,8 +117,7 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private final Map ratisMetricsMap = new ConcurrentHashMap<>(); private List ratisReporterList = null; - private DNMXBeanImpl serviceRuntimeInfo = - new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { }; + private DNMXBeanImpl serviceRuntimeInfo; private ObjectName dnInfoBeanName; private HddsDatanodeClientProtocolServer clientProtocolServer; private OzoneAdmins admins; @@ -210,6 +210,12 @@ public void start(OzoneConfiguration configuration) { } public void start() { + serviceRuntimeInfo = new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { + @Override + public String getNamespace() { + return SCMHAUtils.getScmServiceId(conf); + } + }; serviceRuntimeInfo.setStartTime(); ratisReporterList = RatisDropwizardExports @@ -222,13 +228,13 @@ public void start() { String ip = InetAddress.getByName(hostname).getHostAddress(); datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); + serviceRuntimeInfo.setHostName(hostname); datanodeDetails.setIpAddress(ip); datanodeDetails.setVersion( HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); datanodeDetails.setSetupTime(Time.now()); datanodeDetails.setRevision( HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); - datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); @@ -295,23 +301,30 @@ public void start() { httpServer = new HddsDatanodeHttpServer(conf); httpServer.start(); HttpConfig.Policy policy = HttpConfig.getHttpPolicy(conf); + if (policy.isHttpEnabled()) { - datanodeDetails.setPort(DatanodeDetails.newPort(HTTP, - httpServer.getHttpAddress().getPort())); + int httpPort = httpServer.getHttpAddress().getPort(); + datanodeDetails.setPort(DatanodeDetails.newPort(HTTP, httpPort)); + serviceRuntimeInfo.setHttpPort(String.valueOf(httpPort)); } + if (policy.isHttpsEnabled()) { - datanodeDetails.setPort(DatanodeDetails.newPort(HTTPS, - httpServer.getHttpsAddress().getPort())); + int httpsPort = httpServer.getHttpAddress().getPort(); + datanodeDetails.setPort(DatanodeDetails.newPort(HTTPS, httpsPort)); + serviceRuntimeInfo.setHttpsPort(String.valueOf(httpsPort)); } + } catch (Exception ex) { LOG.error("HttpServer failed to start.", ex); } - clientProtocolServer = new HddsDatanodeClientProtocolServer( datanodeDetails, conf, HddsVersionInfo.HDDS_VERSION_INFO, reconfigurationHandler); + int clientRpcport = clientProtocolServer.getClientRpcAddress().getPort(); + serviceRuntimeInfo.setClientRpcPort(String.valueOf(clientRpcport)); + // Get admin list String starterUser = UserGroupInformation.getCurrentUser().getShortUserName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java index eeed4fab5f7..52217ce7f83 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java @@ -24,7 +24,7 @@ import org.apache.hadoop.security.authorize.Service; import org.apache.ratis.util.MemoizedSupplier; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.function.Supplier; @@ -50,7 +50,7 @@ public static HddsPolicyProvider getInstance() { } private static final List DN_SERVICES = - Arrays.asList( + Collections.singletonList( new Service( OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, ReconfigureProtocol.class) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java index 969add4a15c..3c08e58f9bf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/DNContainerOperationClient.java @@ -33,9 +33,7 @@ import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; import jakarta.annotation.Nonnull; import org.apache.hadoop.ozone.container.common.helpers.TokenHelper; @@ -73,11 +71,7 @@ private static XceiverClientManager createClientManager( throws IOException { ClientTrustManager trustManager = null; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - CACertificateProvider localCaCerts = - () -> HAUtils.buildCAX509List(certificateClient, conf); - CACertificateProvider remoteCacerts = - () -> HAUtils.buildCAX509List(null, conf); - trustManager = new ClientTrustManager(remoteCacerts, localCaCerts); + trustManager = certificateClient.createClientTrustManager(); } DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); return new XceiverClientManager(conf, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java index ac42efd45ad..5d949e90b19 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/checksum/ReconcileContainerTask.java @@ -69,6 +69,16 @@ protected Object getCommandForDebug() { return command.toString(); } + @Override + protected String getMetricName() { + return "ContainerReconciliations"; + } + + @Override + protected String getMetricDescriptionSegment() { + return "Container Reconciliations"; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java index 7487f757fe5..80c390f3b83 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java @@ -55,8 +55,7 @@ public final class BlockDeletingServiceMetrics { @Metric(about = "The total number of DeleteBlockTransaction received") private MutableCounterLong receivedTransactionCount; - @Metric(about = "The total number of DeleteBlockTransaction" + - " that is a retry Transaction") + @Metric(about = "The total number of DeleteBlockTransaction that is a retry Transaction") private MutableCounterLong receivedRetryTransactionCount; @Metric(about = "The total number of Container received to be processed") @@ -74,10 +73,15 @@ public final class BlockDeletingServiceMetrics { @Metric(about = "The total number of Container chosen to be deleted.") private MutableGaugeLong totalContainerChosenCount; - @Metric(about = "The total number of transactions which failed due" + - " to container lock wait timeout.") + @Metric(about = "The total number of transactions which failed due to container lock wait timeout.") private MutableGaugeLong totalLockTimeoutTransactionCount; + @Metric(about = "The number of delete block transactions successful.") + private MutableCounterLong processedTransactionSuccessCount; + + @Metric(about = "The number of delete block transactions failed.") + private MutableGaugeLong processedTransactionFailCount; + private BlockDeletingServiceMetrics() { } @@ -112,6 +116,14 @@ public void incrFailureCount() { this.failureCount.incr(); } + public void incrProcessedTransactionSuccessCount(long count) { + processedTransactionSuccessCount.incr(count); + } + + public void incrProcessedTransactionFailCount(long count) { + processedTransactionFailCount.incr(count); + } + public void incrReceivedTransactionCount(long count) { receivedTransactionCount.incr(count); } @@ -184,6 +196,14 @@ public long getTotalLockTimeoutTransactionCount() { return totalLockTimeoutTransactionCount.value(); } + public long getProcessedTransactionSuccessCount() { + return processedTransactionSuccessCount.value(); + } + + public long getProcessedTransactionFailCount() { + return processedTransactionFailCount.value(); + } + @Override public String toString() { StringBuffer buffer = new StringBuffer(); @@ -202,6 +222,10 @@ public String toString() { + receivedTransactionCount.value()).append("\t") .append("receivedRetryTransactionCount = " + receivedRetryTransactionCount.value()).append("\t") + .append("processedTransactionSuccessCount = " + + processedTransactionSuccessCount.value()).append("\t") + .append("processedTransactionFailCount = " + + processedTransactionFailCount.value()).append("\t") .append("receivedContainerCount = " + receivedContainerCount.value()).append("\t") .append("receivedBlockCount = " diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java index a6e4d6258d9..e52565952a5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java @@ -34,6 +34,7 @@ import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.TotalRunTimeMs; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.QueueWaitingTaskCount; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.InvocationCount; +import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.AvgRunTimeMs; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.ThreadPoolActivePoolSize; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.ThreadPoolMaxPoolSize; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.CommandReceivedCount; @@ -46,6 +47,7 @@ public final class CommandHandlerMetrics implements MetricsSource { enum CommandMetricsMetricsInfo implements MetricsInfo { Command("The type of the SCM command"), TotalRunTimeMs("The total runtime of the command handler in milliseconds"), + AvgRunTimeMs("Average run time of the command handler in milliseconds"), QueueWaitingTaskCount("The number of queued tasks waiting for execution"), InvocationCount("The number of times the command handler has been invoked"), ThreadPoolActivePoolSize("The number of active threads in the thread pool"), @@ -108,6 +110,7 @@ public void getMetrics(MetricsCollector collector, boolean all) { commandHandler.getCommandType().name()); builder.addGauge(TotalRunTimeMs, commandHandler.getTotalRunTime()); + builder.addGauge(AvgRunTimeMs, commandHandler.getAverageRunTime()); builder.addGauge(QueueWaitingTaskCount, commandHandler.getQueuedCount()); builder.addGauge(InvocationCount, commandHandler.getInvocationCount()); int activePoolSize = commandHandler.getThreadPoolActivePoolSize(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index 91bdb17cda9..03dbce061bb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -30,7 +30,9 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.ozone.util.MetricUtil; +import java.io.Closeable; import java.util.EnumMap; /** @@ -47,7 +49,7 @@ */ @InterfaceAudience.Private @Metrics(about = "Storage Container DataNode Metrics", context = "dfs") -public class ContainerMetrics { +public class ContainerMetrics implements Closeable { public static final String STORAGE_CONTAINER_METRICS = "StorageContainerMetrics"; @Metric private MutableCounterLong numOps; @@ -109,6 +111,11 @@ public static void remove() { ms.unregisterSource(STORAGE_CONTAINER_METRICS); } + @Override + public void close() { + opsLatQuantiles.values().forEach(MetricUtil::stop); + } + public void incContainerOpsMetrics(ContainerProtos.Type type) { numOps.incr(); numOpsArray.get(type).incr(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java index 5392af1deb2..b1b65dc5850 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java @@ -142,8 +142,7 @@ public BackgroundTaskQueue getTasks() { chooseContainerForBlockDeletion(getBlockLimitPerInterval(), containerDeletionPolicy); - BackgroundTask - containerBlockInfos = null; + BackgroundTask containerBlockInfos = null; long totalBlocks = 0; for (ContainerBlockInfo containerBlockInfo : containers) { BlockDeletingTaskBuilder builder = @@ -155,13 +154,11 @@ public BackgroundTaskQueue getTasks() { containerBlockInfos = builder.build(); queue.add(containerBlockInfos); totalBlocks += containerBlockInfo.getNumBlocksToDelete(); + LOG.debug("Queued- Container: {}, deleted blocks: {}", + containerBlockInfo.getContainerData().getContainerID(), containerBlockInfo.getNumBlocksToDelete()); } metrics.incrTotalBlockChosenCount(totalBlocks); metrics.incrTotalContainerChosenCount(containers.size()); - if (containers.size() > 0) { - LOG.debug("Queued {} blocks from {} containers for deletion", - totalBlocks, containers.size()); - } } catch (StorageContainerException e) { LOG.warn("Failed to initiate block deleting tasks, " + "caused by unable to get containers info. " diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index b5dfd07d576..8dd35064e6b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -23,8 +23,12 @@ import com.google.common.collect.ImmutableMap; import com.google.protobuf.Message; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; + import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.InMemoryTestTable; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.ContainerLogger; @@ -65,10 +69,24 @@ public class ContainerSet implements Iterable> { new ConcurrentSkipListMap<>(); private Clock clock; private long recoveringTimeout; + private final Table containerIdsTable; + @VisibleForTesting public ContainerSet(long recoveringTimeout) { + this(new InMemoryTestTable<>(), recoveringTimeout); + } + + public ContainerSet(Table continerIdsTable, long recoveringTimeout) { + this(continerIdsTable, recoveringTimeout, false); + } + + public ContainerSet(Table continerIdsTable, long recoveringTimeout, boolean readOnly) { this.clock = Clock.system(ZoneOffset.UTC); + this.containerIdsTable = continerIdsTable; this.recoveringTimeout = recoveringTimeout; + if (!readOnly && containerIdsTable == null) { + throw new IllegalArgumentException("Container table cannot be null when container set is not read only"); + } } public long getCurrentTime() { @@ -85,22 +103,64 @@ public void setRecoveringTimeout(long recoveringTimeout) { this.recoveringTimeout = recoveringTimeout; } + /** + * Add Container to container map. This would fail if the container is already present or has been marked as missing. + * @param container container to be added + * @return If container is added to containerMap returns true, otherwise + * false + */ + public boolean addContainer(Container container) throws StorageContainerException { + return addContainer(container, false); + } + + /** + * Add Container to container map. This would overwrite the container even if it is missing. But would fail if the + * container is already present. + * @param container container to be added + * @return If container is added to containerMap returns true, otherwise + * false + */ + public boolean addContainerByOverwriteMissingContainer(Container container) throws StorageContainerException { + return addContainer(container, true); + } + + public void ensureContainerNotMissing(long containerId, State state) throws StorageContainerException { + if (missingContainerSet.contains(containerId)) { + throw new StorageContainerException(String.format("Container with container Id %d with state : %s is missing in" + + " the DN.", containerId, state), + ContainerProtos.Result.CONTAINER_MISSING); + } + } + /** * Add Container to container map. * @param container container to be added + * @param overwrite if true should overwrite the container if the container was missing. * @return If container is added to containerMap returns true, otherwise * false */ - public boolean addContainer(Container container) throws + private boolean addContainer(Container container, boolean overwrite) throws StorageContainerException { Preconditions.checkNotNull(container, "container cannot be null"); long containerId = container.getContainerData().getContainerID(); + State containerState = container.getContainerData().getState(); + if (!overwrite) { + ensureContainerNotMissing(containerId, containerState); + } if (containerMap.putIfAbsent(containerId, container) == null) { if (LOG.isDebugEnabled()) { LOG.debug("Container with container Id {} is added to containerMap", containerId); } + try { + if (containerIdsTable != null) { + containerIdsTable.put(containerId, containerState.toString()); + } + } catch (IOException e) { + throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); + } + missingContainerSet.remove(containerId); // wish we could have done this from ContainerData.setState container.getContainerData().commitSpace(); if (container.getContainerData().getState() == RECOVERING) { @@ -122,21 +182,69 @@ public boolean addContainer(Container container) throws * @return Container */ public Container getContainer(long containerId) { - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); + Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); return containerMap.get(containerId); } + /** + * Removes container from both memory and database. This should be used when the containerData on disk has been + * removed completely from the node. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeContainer(long containerId) throws StorageContainerException { + return removeContainer(containerId, false, true); + } + + /** + * Removes containerId from memory. This needs to be used when the container is still present on disk, and the + * inmemory state of the container needs to be updated. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeContainerOnlyFromMemory(long containerId) throws StorageContainerException { + return removeContainer(containerId, false, false); + } + + /** + * Marks a container to be missing, thus it removes the container from inmemory containerMap and marks the + * container as missing. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeMissingContainer(long containerId) throws StorageContainerException { + return removeContainer(containerId, true, false); + } + /** * Removes the Container matching with specified containerId. * @param containerId ID of the container to remove * @return If container is removed from containerMap returns true, otherwise * false */ - public boolean removeContainer(long containerId) { + private boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) + throws StorageContainerException { Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); + //We need to add to missing container set before removing containerMap since there could be write chunk operation + // that could recreate the container in another volume if we remove it from the map before adding to missing + // container. + if (markMissing) { + missingContainerSet.add(containerId); + } Container removed = containerMap.remove(containerId); + if (removeFromDB) { + try { + if (containerIdsTable != null) { + containerIdsTable.delete(containerId); + } + } catch (IOException e) { + throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); + } + } if (removed == null) { LOG.debug("Container with containerId {} is not present in " + "containerMap", containerId); @@ -189,22 +297,21 @@ public int containerCount() { * Send FCR which will not contain removed containers. * * @param context StateContext - * @return */ - public void handleVolumeFailures(StateContext context) { + public void handleVolumeFailures(StateContext context) throws StorageContainerException { AtomicBoolean failedVolume = new AtomicBoolean(false); AtomicInteger containerCount = new AtomicInteger(0); - containerMap.values().forEach(c -> { + for (Container c : containerMap.values()) { ContainerData data = c.getContainerData(); if (data.getVolume().isFailed()) { - removeContainer(data.getContainerID()); + removeMissingContainer(data.getContainerID()); LOG.debug("Removing Container {} as the Volume {} " + - "has failed", data.getContainerID(), data.getVolume()); + "has failed", data.getContainerID(), data.getVolume()); failedVolume.set(true); containerCount.incrementAndGet(); ContainerLogger.logLost(data, "Volume failure"); } - }); + } if (failedVolume.get()) { try { @@ -252,6 +359,21 @@ public Iterator> getContainerIterator(HddsVolume volume) { .iterator(); } + /** + * Get the number of containers based on the given volume. + * + * @param volume hdds volume. + * @return number of containers + */ + public long containerCount(HddsVolume volume) { + Preconditions.checkNotNull(volume); + Preconditions.checkNotNull(volume.getStorageID()); + String volumeUuid = volume.getStorageID(); + return containerMap.values().stream() + .filter(x -> volumeUuid.equals(x.getContainerData().getVolume() + .getStorageID())).count(); + } + /** * Return an containerMap iterator over {@link ContainerSet#containerMap}. * @return containerMap Iterator @@ -348,6 +470,10 @@ public Set getMissingContainerSet() { return missingContainerSet; } + public Table getContainerIdsTable() { + return containerIdsTable; + } + /** * Builds the missing container set by taking a diff between total no * containers actually found and number of containers which actually diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 417fb443eef..cd99b909231 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -177,7 +177,8 @@ private boolean canIgnoreException(Result result) { case CONTAINER_UNHEALTHY: case CLOSED_CONTAINER_IO: case DELETE_ON_OPEN_CONTAINER: - case UNSUPPORTED_REQUEST: // Blame client for sending unsupported request. + case UNSUPPORTED_REQUEST:// Blame client for sending unsupported request. + case CONTAINER_MISSING: return true; default: return false; @@ -278,7 +279,8 @@ private ContainerCommandResponseProto dispatchRequest( getMissingContainerSet().remove(containerID); } } - if (getMissingContainerSet().contains(containerID)) { + if (cmdType != Type.CreateContainer && !HddsUtils.isReadOnly(msg) + && getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID + " has been lost and cannot be recreated on this DataNode", @@ -649,7 +651,7 @@ public Handler getHandler(ContainerProtos.ContainerType containerType) { @Override public void setClusterId(String clusterId) { - Preconditions.checkNotNull(clusterId, "clusterId Cannot be null"); + Preconditions.checkNotNull(clusterId, "clusterId cannot be null"); if (this.clusterId == null) { this.clusterId = clusterId; for (Map.Entry handlerMap : handlers.entrySet()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java index d6ca2d120e6..2e11cde3d9e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -35,7 +35,7 @@ /** * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}). * The outer container map does not entail locking for a better performance. - * The inner {@link BlockDataMap} is synchronized. + * The inner {@code BlockDataMap} is synchronized. * * This class will maintain list of open keys per container when closeContainer * command comes, it should autocommit all open keys of a open container before diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java index c584ba79037..bb47b5b9b6f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java @@ -90,7 +90,7 @@ public final List chooseContainerForBlockDeletion( /** * Abstract step for ordering the container data to be deleted. * Subclass need to implement the concrete ordering implementation - * in descending order (more prioritized -> less prioritized) + * in descending order (more prioritized -> less prioritized) * @param candidateContainers candidate containers to be ordered */ protected abstract void orderByDescendingPriority( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java index d02bae0a35a..f075b6f67ca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java @@ -75,7 +75,6 @@ void validateContainerCommand( /** * Returns the handler for the specified containerType. * @param containerType - * @return */ Handler getHandler(ContainerProtos.ContainerType containerType); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index 77a4d97878d..fb9dc49071b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -23,6 +23,7 @@ import java.io.OutputStream; import java.util.Set; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -96,7 +97,8 @@ public abstract StateMachine.DataChannel getStreamDataChannel( * * @return datanode Id */ - protected String getDatanodeId() { + @VisibleForTesting + public String getDatanodeId() { return datanodeId; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 55fcbcdb3cc..9d157cc9912 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -218,7 +218,6 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, ReplicationSupervisorMetrics.create(supervisor); ecReconstructionMetrics = ECReconstructionMetrics.create(); - ecReconstructionCoordinator = new ECReconstructionCoordinator( conf, certClient, secretKeyClient, context, ecReconstructionMetrics, threadNamePrefix); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index a6c3b11de92..b3854e7ecd2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -234,12 +234,17 @@ public void logIfNeeded(Exception ex) { } if (missCounter == 0) { + long missedDurationSeconds = TimeUnit.MILLISECONDS.toSeconds( + this.getMissedCount() * getScmHeartbeatInterval(this.conf) + ); LOG.warn( - "Unable to communicate to {} server at {} for past {} seconds.", - serverName, - getAddress().getHostString() + ":" + getAddress().getPort(), - TimeUnit.MILLISECONDS.toSeconds(this.getMissedCount() * - getScmHeartbeatInterval(this.conf)), ex); + "Unable to communicate to {} server at {}:{} for past {} seconds.", + serverName, + address.getAddress(), + address.getPort(), + missedDurationSeconds, + ex + ); } if (LOG.isTraceEnabled()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java index 8533f7384d4..cd032d4b275 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java @@ -18,7 +18,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -32,6 +31,8 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; @@ -58,11 +59,11 @@ public class CloseContainerCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); - private final ExecutorService executor; - private long totalTime; + private final ThreadPoolExecutor executor; + private final MutableRate opsLatencyMs; /** - * Constructs a ContainerReport handler. + * Constructs a close container command handler. */ public CloseContainerCommandHandler( int threadPoolSize, int queueSize, String threadNamePrefix) { @@ -73,6 +74,9 @@ public CloseContainerCommandHandler( new ThreadFactoryBuilder() .setNameFormat(threadNamePrefix + "CloseContainerThread-%d") .build()); + MetricsRegistry registry = new MetricsRegistry( + CloseContainerCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.closeContainerCommand + "Ms"); } /** @@ -156,7 +160,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, LOG.error("Can't close container #{}", containerId, e); } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -205,19 +209,26 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override public int getQueuedCount() { return queuedCount.get(); } + + @Override + public int getThreadPoolMaxPoolSize() { + return executor.getMaximumPoolSize(); + } + + @Override + public int getThreadPoolActivePoolSize() { + return executor.getActiveCount(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java index 241abb6f4ae..be39277fdfa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -60,9 +62,9 @@ public class ClosePipelineCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); - private long totalTime; private final Executor executor; private final BiFunction newRaftClient; + private final MutableRate opsLatencyMs; /** * Constructs a closePipelineCommand handler. @@ -80,6 +82,9 @@ public ClosePipelineCommandHandler( Executor executor) { this.newRaftClient = newRaftClient; this.executor = executor; + MetricsRegistry registry = new MetricsRegistry( + ClosePipelineCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.closePipelineCommand + "Ms"); } /** @@ -155,7 +160,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, } } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -187,15 +192,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java index 9035b79c670..c3f8da74c7a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java @@ -56,11 +56,6 @@ public final class CommandDispatcher { private CommandDispatcher(OzoneContainer container, SCMConnectionManager connectionManager, StateContext context, CommandHandler... handlers) { - Preconditions.checkNotNull(context); - Preconditions.checkNotNull(handlers); - Preconditions.checkArgument(handlers.length > 0); - Preconditions.checkNotNull(container); - Preconditions.checkNotNull(connectionManager); this.context = context; this.container = container; this.connectionManager = connectionManager; @@ -77,6 +72,7 @@ private CommandDispatcher(OzoneContainer container, SCMConnectionManager commandHandlerMetrics = CommandHandlerMetrics.create(handlerMap); } + @VisibleForTesting public CommandHandler getCloseContainerHandler() { return handlerMap.get(Type.closeContainerCommand); } @@ -201,11 +197,12 @@ public Builder setContext(StateContext stateContext) { * @return Command Dispatcher. */ public CommandDispatcher build() { - Preconditions.checkNotNull(this.connectionManager, "Missing connection" + - " manager."); - Preconditions.checkNotNull(this.container, "Missing container."); - Preconditions.checkNotNull(this.context, "Missing context."); - Preconditions.checkArgument(this.handlerList.size() > 0); + Preconditions.checkNotNull(this.connectionManager, + "Missing scm connection manager."); + Preconditions.checkNotNull(this.container, "Missing ozone container."); + Preconditions.checkNotNull(this.context, "Missing state context."); + Preconditions.checkArgument(this.handlerList.size() > 0, + "The number of command handlers must be greater than 0."); return new CommandDispatcher(this.container, this.connectionManager, this.context, handlerList.toArray( new CommandHandler[handlerList.size()])); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index 4a36a1987de..62fc8a919d8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; @@ -59,8 +61,8 @@ public class CreatePipelineCommandHandler implements CommandHandler { private final AtomicInteger queuedCount = new AtomicInteger(0); private final BiFunction newRaftClient; - private long totalTime; private final Executor executor; + private final MutableRate opsLatencyMs; /** * Constructs a createPipelineCommand handler. @@ -75,6 +77,9 @@ public CreatePipelineCommandHandler(ConfigurationSource conf, Executor executor) { this.newRaftClient = newRaftClient; this.executor = executor; + MetricsRegistry registry = new MetricsRegistry( + CreatePipelineCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.createPipelineCommand + "Ms"); } /** @@ -135,7 +140,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, } } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -167,15 +172,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 747749066e3..6a158f51023 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; @@ -91,7 +93,6 @@ public class DeleteBlocksCommandHandler implements CommandHandler { private final ContainerSet containerSet; private final ConfigurationSource conf; private int invocationCount; - private long totalTime; private final ThreadPoolExecutor executor; private final LinkedBlockingQueue deleteCommandQueues; private final Daemon handlerThread; @@ -99,6 +100,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler { private final BlockDeletingServiceMetrics blockDeleteMetrics; private final long tryLockTimeoutMs; private final Map schemaHandlers; + private final MutableRate opsLatencyMs; public DeleteBlocksCommandHandler(OzoneContainer container, ConfigurationSource conf, DatanodeConfiguration dnConf, @@ -121,6 +123,9 @@ public DeleteBlocksCommandHandler(OzoneContainer container, dnConf.getBlockDeleteThreads(), threadFactory); this.deleteCommandQueues = new LinkedBlockingQueue<>(dnConf.getBlockDeleteQueueLimit()); + MetricsRegistry registry = new MetricsRegistry( + DeleteBlocksCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.deleteBlocksCommand + "Ms"); long interval = dnConf.getBlockDeleteCommandWorkerInterval().toMillis(); handlerThread = new Daemon(new DeleteCmdWorker(interval)); handlerThread.start(); @@ -168,12 +173,12 @@ public int getQueuedCount() { @Override public int getThreadPoolMaxPoolSize() { - return ((ThreadPoolExecutor)executor).getMaximumPoolSize(); + return executor.getMaximumPoolSize(); } @Override public int getThreadPoolActivePoolSize() { - return ((ThreadPoolExecutor)executor).getActiveCount(); + return executor.getActiveCount(); } /** @@ -354,10 +359,11 @@ private void processCmd(DeleteCmdInfo cmd) { DeletedContainerBlocksSummary summary = DeletedContainerBlocksSummary.getFrom(containerBlocks); LOG.info("Summary of deleting container blocks, numOfTransactions={}, " - + "numOfContainers={}, numOfBlocks={}", + + "numOfContainers={}, numOfBlocks={}, commandId={}.", summary.getNumOfTxs(), summary.getNumOfContainers(), - summary.getNumOfBlocks()); + summary.getNumOfBlocks(), + cmd.getCmd().getId()); if (LOG.isDebugEnabled()) { LOG.debug("Start to delete container blocks, TXIDs={}", summary.getTxIDSummary()); @@ -384,7 +390,14 @@ private void processCmd(DeleteCmdInfo cmd) { LOG.debug("Sending following block deletion ACK to SCM"); for (DeleteBlockTransactionResult result : blockDeletionACK .getResultsList()) { - LOG.debug("{} : {}", result.getTxID(), result.getSuccess()); + boolean success = result.getSuccess(); + LOG.debug("TxId = {} : ContainerId = {} : {}", + result.getTxID(), result.getContainerID(), success); + if (success) { + blockDeleteMetrics.incrProcessedTransactionSuccessCount(1); + } else { + blockDeleteMetrics.incrProcessedTransactionFailCount(1); + } } } } @@ -403,7 +416,7 @@ private void processCmd(DeleteCmdInfo cmd) { }; updateCommandStatus(cmd.getContext(), cmd.getCmd(), statusUpdater, LOG); long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); invocationCount++; } } @@ -666,15 +679,12 @@ public int getInvocationCount() { @Override public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java index ead81c32e5b..59aaacc1c80 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java @@ -22,6 +22,8 @@ import java.util.concurrent.RejectedExecutionException; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -36,11 +38,9 @@ import java.io.IOException; import java.time.Clock; import java.util.OptionalLong; -import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** * Handler to process the DeleteContainerCommand from SCM. @@ -52,10 +52,10 @@ public class DeleteContainerCommandHandler implements CommandHandler { private final AtomicInteger invocationCount = new AtomicInteger(0); private final AtomicInteger timeoutCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); - private final ExecutorService executor; + private final ThreadPoolExecutor executor; private final Clock clock; private int maxQueueSize; + private final MutableRate opsLatencyMs; public DeleteContainerCommandHandler( int threadPoolSize, Clock clock, int queueSize, String threadNamePrefix) { @@ -70,10 +70,13 @@ public DeleteContainerCommandHandler( } protected DeleteContainerCommandHandler(Clock clock, - ExecutorService executor, int queueSize) { + ThreadPoolExecutor executor, int queueSize) { this.executor = executor; this.clock = clock; maxQueueSize = queueSize; + MetricsRegistry registry = new MetricsRegistry( + DeleteContainerCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.deleteContainerCommand + "Ms"); } @Override public void handle(final SCMCommand command, @@ -125,13 +128,13 @@ private void handleInternal(SCMCommand command, StateContext context, } catch (IOException e) { LOG.error("Exception occurred while deleting the container.", e); } finally { - totalTime.getAndAdd(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } } @Override public int getQueuedCount() { - return ((ThreadPoolExecutor)executor).getQueue().size(); + return executor.getQueue().size(); } @Override @@ -150,14 +153,22 @@ public int getTimeoutCount() { @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); + } + + @Override + public int getThreadPoolMaxPoolSize() { + return executor.getMaximumPoolSize(); + } + + @Override + public int getThreadPoolActivePoolSize() { + return executor.getActiveCount(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java index bd7ec5710d9..77e152447b9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.FinalizeNewLayoutVersionCommandProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; @@ -42,12 +44,15 @@ public class FinalizeNewLayoutVersionCommandHandler implements CommandHandler { LoggerFactory.getLogger(FinalizeNewLayoutVersionCommandHandler.class); private AtomicLong invocationCount = new AtomicLong(0); - private long totalTime; + private final MutableRate opsLatencyMs; /** * Constructs a FinalizeNewLayoutVersionCommandHandler. */ public FinalizeNewLayoutVersionCommandHandler() { + MetricsRegistry registry = new MetricsRegistry( + FinalizeNewLayoutVersionCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.finalizeNewLayoutVersionCommand + "Ms"); } /** @@ -82,7 +87,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, LOG.error("Exception during finalization.", e); } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } } @@ -113,15 +118,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java index 602687d7a00..030d169e9b8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java @@ -36,6 +36,7 @@ public class ReconstructECContainersCommandHandler implements CommandHandler { private final ReplicationSupervisor supervisor; private final ECReconstructionCoordinator coordinator; private final ConfigurationSource conf; + private String metricsName; public ReconstructECContainersCommandHandler(ConfigurationSource conf, ReplicationSupervisor supervisor, @@ -52,8 +53,16 @@ public void handle(SCMCommand command, OzoneContainer container, (ReconstructECContainersCommand) command; ECReconstructionCommandInfo reconstructionCommandInfo = new ECReconstructionCommandInfo(ecContainersCommand); - this.supervisor.addTask(new ECReconstructionCoordinatorTask( - coordinator, reconstructionCommandInfo)); + ECReconstructionCoordinatorTask task = new ECReconstructionCoordinatorTask( + coordinator, reconstructionCommandInfo); + if (this.metricsName == null) { + this.metricsName = task.getMetricName(); + } + this.supervisor.addTask(task); + } + + public String getMetricsName() { + return this.metricsName; } @Override @@ -63,23 +72,26 @@ public Type getCommandType() { @Override public int getInvocationCount() { - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestCount(metricsName); } @Override public long getAverageRunTime() { - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestAvgTime(metricsName); } @Override public long getTotalRunTime() { - return 0; + return this.metricsName == null ? 0 : this.supervisor + .getReplicationRequestTotalTime(metricsName); } @Override public int getQueuedCount() { - return supervisor - .getInFlightReplications(ECReconstructionCoordinatorTask.class); + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationQueuedCount(metricsName); } public ConfigurationSource getConf() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java index 3c14b2fb161..1ab31ba1c41 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java @@ -18,6 +18,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -27,7 +29,6 @@ import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** * Command handler to refresh usage info of all volumes. @@ -38,9 +39,12 @@ public class RefreshVolumeUsageCommandHandler implements CommandHandler { LoggerFactory.getLogger(RefreshVolumeUsageCommandHandler.class); private final AtomicInteger invocationCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); + private final MutableRate opsLatencyMs; public RefreshVolumeUsageCommandHandler() { + MetricsRegistry registry = new MetricsRegistry( + RefreshVolumeUsageCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(Type.refreshVolumeUsageInfo + "Ms"); } @Override @@ -50,7 +54,7 @@ public void handle(SCMCommand command, OzoneContainer container, invocationCount.incrementAndGet(); final long startTime = Time.monotonicNow(); container.getVolumeSet().refreshAllVolumeUsage(); - totalTime.getAndAdd(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } @Override @@ -66,14 +70,12 @@ public int getInvocationCount() { @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java index 21b26339e23..242a4eb74be 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java @@ -43,29 +43,28 @@ public class ReplicateContainerCommandHandler implements CommandHandler { static final Logger LOG = LoggerFactory.getLogger(ReplicateContainerCommandHandler.class); - private int invocationCount; - - private long totalTime; - - private ConfigurationSource conf; - private ReplicationSupervisor supervisor; private ContainerReplicator downloadReplicator; private ContainerReplicator pushReplicator; + private String metricsName; + public ReplicateContainerCommandHandler( ConfigurationSource conf, ReplicationSupervisor supervisor, ContainerReplicator downloadReplicator, ContainerReplicator pushReplicator) { - this.conf = conf; this.supervisor = supervisor; this.downloadReplicator = downloadReplicator; this.pushReplicator = pushReplicator; } + public String getMetricsName() { + return this.metricsName; + } + @Override public void handle(SCMCommand command, OzoneContainer container, StateContext context, SCMConnectionManager connectionManager) { @@ -86,12 +85,16 @@ public void handle(SCMCommand command, OzoneContainer container, downloadReplicator : pushReplicator; ReplicationTask task = new ReplicationTask(replicateCommand, replicator); + if (metricsName == null) { + metricsName = task.getMetricName(); + } supervisor.addTask(task); } @Override public int getQueuedCount() { - return supervisor.getInFlightReplications(ReplicationTask.class); + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationQueuedCount(metricsName); } @Override @@ -101,19 +104,19 @@ public SCMCommandProto.Type getCommandType() { @Override public int getInvocationCount() { - return this.invocationCount; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestCount(metricsName); } @Override public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestAvgTime(metricsName); } @Override public long getTotalRunTime() { - return totalTime; + return this.metricsName == null ? 0 : this.supervisor + .getReplicationRequestTotalTime(metricsName); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java index 6f7f4414eeb..33563624795 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java @@ -21,8 +21,10 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SetNodeOperationalStateCommandProto; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -39,7 +41,6 @@ import java.io.File; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -54,7 +55,7 @@ public class SetNodeOperationalStateCommandHandler implements CommandHandler { private final ConfigurationSource conf; private final Consumer replicationSupervisor; private final AtomicInteger invocationCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); + private final MutableRate opsLatencyMs; /** * Set Node State command handler. @@ -65,6 +66,9 @@ public SetNodeOperationalStateCommandHandler(ConfigurationSource conf, Consumer replicationSupervisor) { this.conf = conf; this.replicationSupervisor = replicationSupervisor; + MetricsRegistry registry = new MetricsRegistry( + SetNodeOperationalStateCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(Type.setNodeOperationalStateCommand + "Ms"); } /** @@ -80,9 +84,6 @@ public void handle(SCMCommand command, OzoneContainer container, StateContext context, SCMConnectionManager connectionManager) { long startTime = Time.monotonicNow(); invocationCount.incrementAndGet(); - StorageContainerDatanodeProtocolProtos.SetNodeOperationalStateCommandProto - setNodeCmdProto = null; - if (command.getType() != Type.setNodeOperationalStateCommand) { LOG.warn("Skipping handling command, expected command " + "type {} but found {}", @@ -91,7 +92,7 @@ public void handle(SCMCommand command, OzoneContainer container, } SetNodeOperationalStateCommand setNodeCmd = (SetNodeOperationalStateCommand) command; - setNodeCmdProto = setNodeCmd.getProto(); + SetNodeOperationalStateCommandProto setNodeCmdProto = setNodeCmd.getProto(); DatanodeDetails dni = context.getParent().getDatanodeDetails(); HddsProtos.NodeOperationalState state = setNodeCmdProto.getNodeOperationalState(); @@ -106,7 +107,7 @@ public void handle(SCMCommand command, OzoneContainer container, // handler interface. } replicationSupervisor.accept(state); - totalTime.addAndGet(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } // TODO - this duplicates code in HddsDatanodeService and InitDatanodeState @@ -125,8 +126,7 @@ private void persistDatanodeDetails(DatanodeDetails dnDetails) * @return Type */ @Override - public StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type - getCommandType() { + public Type getCommandType() { return Type.setNodeOperationalStateCommand; } @@ -147,14 +147,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index b6ab4748fe3..caa6b9df121 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -495,7 +495,7 @@ public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { /** * Sets the LayoutVersionManager. * - * @param versionMgr - config + * @param lvm config * @return Builder */ public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 71f95cc4d32..969756b40f8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -244,7 +244,7 @@ public Builder setConfig(ConfigurationSource config) { /** * Sets the LayoutVersionManager. * - * @param versionMgr - config + * @param lvm config * @return Builder. */ public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index e702b1e6e15..968c9b9a6e6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import java.io.IOException; +import java.net.BindException; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -104,7 +105,7 @@ public EndpointStateMachine.EndPointStates call() throws Exception { LOG.debug("Cannot execute GetVersion task as endpoint state machine " + "is in {} state", rpcEndPoint.getState()); } - } catch (DiskOutOfSpaceException ex) { + } catch (DiskOutOfSpaceException | BindException ex) { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); } catch (IOException ex) { rpcEndPoint.logIfNeeded(ex); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java index 9c3f29d0f0c..5f1914402d0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; @@ -31,7 +30,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.InputStream; import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.getSendMethod; @@ -45,28 +43,20 @@ public class GrpcXceiverService extends LOG = LoggerFactory.getLogger(GrpcXceiverService.class); private final ContainerDispatcher dispatcher; - private final boolean zeroCopyEnabled; private final ZeroCopyMessageMarshaller zeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( ContainerCommandRequestProto.getDefaultInstance()); - public GrpcXceiverService(ContainerDispatcher dispatcher, - boolean zeroCopyEnabled) { + public GrpcXceiverService(ContainerDispatcher dispatcher) { this.dispatcher = dispatcher; - this.zeroCopyEnabled = zeroCopyEnabled; } /** - * Bind service with zerocopy marshaller equipped for the `send` API if - * zerocopy is enabled. + * Bind service with zerocopy marshaller equipped for the `send` API. * @return service definition. */ public ServerServiceDefinition bindServiceWithZeroCopy() { ServerServiceDefinition orig = super.bindService(); - if (!zeroCopyEnabled) { - LOG.info("Zerocopy is not enabled."); - return orig; - } ServerServiceDefinition.Builder builder = ServerServiceDefinition.builder(orig.getServiceDescriptor().getName()); @@ -117,10 +107,7 @@ public void onNext(ContainerCommandRequestProto request) { isClosed.set(true); responseObserver.onError(e); } finally { - InputStream popStream = zeroCopyMessageMarshaller.popStream(request); - if (popStream != null) { - IOUtils.close(LOG, popStream); - } + zeroCopyMessageMarshaller.release(request); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index ad9c5c9d9ca..0d95ac25eda 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.transport.server; import java.io.IOException; +import java.net.BindException; import java.util.Collections; import java.util.List; import java.util.UUID; @@ -29,7 +30,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -66,9 +66,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED_DEFAULT; - /** * Creates a Grpc server endpoint that acts as the communication layer for * Ozone containers. @@ -134,13 +131,9 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, eventLoopGroup = new NioEventLoopGroup(poolSize / 10, factory); channelType = NioServerSocketChannel.class; } - final boolean zeroCopyEnabled = conf.getBoolean( - OZONE_EC_GRPC_ZERO_COPY_ENABLED, - OZONE_EC_GRPC_ZERO_COPY_ENABLED_DEFAULT); LOG.info("GrpcServer channel type {}", channelType.getSimpleName()); - GrpcXceiverService xceiverService = new GrpcXceiverService(dispatcher, - zeroCopyEnabled); + GrpcXceiverService xceiverService = new GrpcXceiverService(dispatcher); NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) .bossEventLoopGroup(eventLoopGroup) @@ -185,7 +178,16 @@ public HddsProtos.ReplicationType getServerType() { @Override public void start() throws IOException { if (!isStarted) { - server.start(); + try { + server.start(); + } catch (IOException e) { + LOG.error("Error while starting the server", e); + if (e.getMessage().contains("Failed to bind to address")) { + throw new BindException(e.getMessage()); + } else { + throw e; + } + } int realPort = server.getPort(); if (port == 0) { @@ -195,9 +197,7 @@ public void start() throws IOException { } //register the real port to the datanode details. - datanodeDetails.setPort(DatanodeDetails - .newPort(Name.STANDALONE, - realPort)); + datanodeDetails.setPort(DatanodeDetails.newStandalonePort(realPort)); isStarted = true; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index b3398de07ad..23be4138b60 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -41,8 +41,9 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; -import java.util.stream.Collectors; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -64,24 +65,24 @@ import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.utils.BufferUtils; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.LogEntryProto; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.proto.RaftProtos.RoleInfoProto; +import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientRequest; +import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; +import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.RaftServer; @@ -97,10 +98,10 @@ import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat; +import org.apache.ratis.util.JavaUtils; import org.apache.ratis.util.LifeCycle; import org.apache.ratis.util.TaskQueue; import org.apache.ratis.util.function.CheckedSupplier; -import org.apache.ratis.util.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -184,7 +185,6 @@ long getStartTime() { private final SimpleStateMachineStorage storage = new SimpleStateMachineStorage(); - private final RaftGroupId gid; private final ContainerDispatcher dispatcher; private final ContainerController containerController; private final XceiverServerRatis ratisServer; @@ -204,6 +204,7 @@ long getStartTime() { private final boolean waitOnBothFollowers; private final HddsDatanodeService datanodeService; private static Semaphore semaphore = new Semaphore(1); + private final AtomicBoolean peersValidated; /** * CSM metrics. @@ -219,7 +220,6 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI ConfigurationSource conf, String threadNamePrefix) { this.datanodeService = hddsDatanodeService; - this.gid = gid; this.dispatcher = dispatcher; this.containerController = containerController; this.ratisServer = ratisServer; @@ -233,7 +233,7 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI // cache with FIFO eviction, and if element not found, this needs // to be obtained from disk for slow follower stateMachineDataCache = new ResourceCache<>( - (index, data) -> ((ByteString)data).size(), + (index, data) -> data.size(), pendingRequestsBytesLimit, (p) -> { if (p.wasEvicted()) { @@ -255,6 +255,7 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); + this.peersValidated = new AtomicBoolean(false); ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat( @@ -268,6 +269,19 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI } + private void validatePeers() throws IOException { + if (this.peersValidated.get()) { + return; + } + final RaftGroup group = ratisServer.getServerDivision(getGroupId()).getGroup(); + final RaftPeerId selfId = ratisServer.getServer().getId(); + if (group.getPeer(selfId) == null) { + throw new StorageContainerException("Current datanode " + selfId + " is not a member of " + group, + ContainerProtos.Result.INVALID_CONFIG); + } + peersValidated.set(true); + } + @Override public StateMachineStorage getStateMachineStorage() { return storage; @@ -283,8 +297,9 @@ public void initialize( throws IOException { super.initialize(server, id, raftStorage); storage.init(raftStorage); - ratisServer.notifyGroupAdd(gid); + ratisServer.notifyGroupAdd(id); + LOG.info("{}: initialize {}", server.getId(), id); loadSnapshot(storage.getLatestSnapshot()); } @@ -293,7 +308,7 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) if (snapshot == null) { TermIndex empty = TermIndex.valueOf(0, RaftLog.INVALID_LOG_INDEX); LOG.info("{}: The snapshot info is null. Setting the last applied index " + - "to:{}", gid, empty); + "to:{}", getGroupId(), empty); setLastAppliedTermIndex(empty); return empty.getIndex(); } @@ -301,7 +316,7 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) final File snapshotFile = snapshot.getFile().getPath().toFile(); final TermIndex last = SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile); - LOG.info("{}: Setting the last applied index to {}", gid, last); + LOG.info("{}: Setting the last applied index to {}", getGroupId(), last); setLastAppliedTermIndex(last); // initialize the dispatcher with snapshot so that it build the missing @@ -351,7 +366,7 @@ public long takeSnapshot() throws IOException { long startTime = Time.monotonicNow(); if (!isStateMachineHealthy()) { String msg = - "Failed to take snapshot " + " for " + gid + " as the stateMachine" + "Failed to take snapshot " + " for " + getGroupId() + " as the stateMachine" + " is unhealthy. The last applied index is at " + ti; StateMachineException sme = new StateMachineException(msg); LOG.error(msg); @@ -360,19 +375,19 @@ public long takeSnapshot() throws IOException { if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) { final File snapshotFile = storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); - LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile); + LOG.info("{}: Taking a snapshot at:{} file {}", getGroupId(), ti, snapshotFile); try (FileOutputStream fos = new FileOutputStream(snapshotFile)) { persistContainerSet(fos); fos.flush(); // make sure the snapshot file is synced fos.getFD().sync(); } catch (IOException ioe) { - LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti, + LOG.error("{}: Failed to write snapshot at:{} file {}", getGroupId(), ti, snapshotFile); throw ioe; } LOG.info("{}: Finished taking a snapshot at:{} file:{} took: {} ms", - gid, ti, snapshotFile, (Time.monotonicNow() - startTime)); + getGroupId(), ti, snapshotFile, (Time.monotonicNow() - startTime)); return ti.getIndex(); } return -1; @@ -386,7 +401,7 @@ public TransactionContext startTransaction(LogEntryProto entry, RaftPeerRole rol final StateMachineLogEntryProto stateMachineLogEntry = entry.getStateMachineLogEntry(); final ContainerCommandRequestProto logProto; try { - logProto = getContainerCommandRequestProto(gid, stateMachineLogEntry.getLogData()); + logProto = getContainerCommandRequestProto(getGroupId(), stateMachineLogEntry.getLogData()); } catch (InvalidProtocolBufferException e) { trx.setException(e); return trx; @@ -413,7 +428,7 @@ public TransactionContext startTransaction(RaftClientRequest request) long startTime = Time.monotonicNowNanos(); final ContainerCommandRequestProto proto = message2ContainerCommandRequestProto(request.getMessage()); - Preconditions.checkArgument(request.getRaftGroupId().equals(gid)); + Preconditions.checkArgument(request.getRaftGroupId().equals(getGroupId())); final TransactionContext.Builder builder = TransactionContext.newBuilder() .setClientRequest(request) @@ -449,7 +464,7 @@ public TransactionContext startTransaction(RaftClientRequest request) final WriteChunkRequestProto.Builder commitWriteChunkProto = WriteChunkRequestProto.newBuilder(write) .clearData(); protoBuilder.setWriteChunk(commitWriteChunkProto) - .setPipelineID(gid.getUuid().toString()) + .setPipelineID(getGroupId().getUuid().toString()) .setTraceID(proto.getTraceID()); builder.setStateMachineData(write.getData()); @@ -491,20 +506,20 @@ private static ContainerCommandRequestProto getContainerCommandRequestProto( private ContainerCommandRequestProto message2ContainerCommandRequestProto( Message message) throws InvalidProtocolBufferException { - return ContainerCommandRequestMessage.toProto(message.getContent(), gid); + return ContainerCommandRequestMessage.toProto(message.getContent(), getGroupId()); } private ContainerCommandResponseProto dispatchCommand( ContainerCommandRequestProto requestProto, DispatcherContext context) { if (LOG.isTraceEnabled()) { - LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, + LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", getGroupId(), requestProto.getCmdType(), requestProto.getContainerID(), requestProto.getPipelineID(), requestProto.getTraceID()); } ContainerCommandResponseProto response = dispatcher.dispatch(requestProto, context); if (LOG.isTraceEnabled()) { - LOG.trace("{}: response {}", gid, response); + LOG.trace("{}: response {}", getGroupId(), response); } return response; } @@ -531,7 +546,7 @@ private CompletableFuture writeStateMachineData( RaftServer server = ratisServer.getServer(); Preconditions.checkArgument(!write.getData().isEmpty()); try { - if (server.getDivision(gid).getInfo().isLeader()) { + if (server.getDivision(getGroupId()).getInfo().isLeader()) { stateMachineDataCache.put(entryIndex, write.getData()); } } catch (InterruptedException ioe) { @@ -559,7 +574,7 @@ private CompletableFuture writeStateMachineData( return dispatchCommand(requestProto, context); } catch (Exception e) { LOG.error("{}: writeChunk writeStateMachineData failed: blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), + "{} logIndex {} chunkName {}", getGroupId(), write.getBlockID(), entryIndex, write.getChunkData().getChunkName(), e); metrics.incNumWriteDataFails(); // write chunks go in parallel. It's possible that one write chunk @@ -573,7 +588,7 @@ private CompletableFuture writeStateMachineData( writeChunkFutureMap.put(entryIndex, writeChunkFuture); if (LOG.isDebugEnabled()) { LOG.debug("{}: writeChunk writeStateMachineData : blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), + "{} logIndex {} chunkName {}", getGroupId(), write.getBlockID(), entryIndex, write.getChunkData().getChunkName()); } // Remove the future once it finishes execution from the @@ -587,7 +602,7 @@ private CompletableFuture writeStateMachineData( && r.getResult() != ContainerProtos.Result.CHUNK_FILE_INCONSISTENCY) { StorageContainerException sce = new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" + + LOG.error(getGroupId() + ": writeChunk writeStateMachineData failed: blockId" + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + write.getChunkData().getChunkName() + " Error message: " + r.getMessage() + " Container Result: " + r.getResult()); @@ -601,7 +616,7 @@ private CompletableFuture writeStateMachineData( metrics.incNumBytesWrittenCount( requestProto.getWriteChunk().getChunkData().getLen()); if (LOG.isDebugEnabled()) { - LOG.debug(gid + + LOG.debug(getGroupId() + ": writeChunk writeStateMachineData completed: blockId" + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + write.getChunkData().getChunkName()); @@ -622,7 +637,7 @@ private StateMachine.DataChannel getStreamDataChannel( DispatcherContext context) throws StorageContainerException { if (LOG.isDebugEnabled()) { LOG.debug("{}: getStreamDataChannel {} containerID={} pipelineID={} " + - "traceID={}", gid, requestProto.getCmdType(), + "traceID={}", getGroupId(), requestProto.getCmdType(), requestProto.getContainerID(), requestProto.getPipelineID(), requestProto.getTraceID()); } @@ -704,9 +719,10 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { return chunkExecutors.get(i); } - /* - * writeStateMachineData calls are not synchronized with each other - * and also with applyTransaction. + /** + * {@link #writeStateMachineData} + * calls are not synchronized with each other + * and also with {@code applyTransaction(TransactionContext)}. */ @Override public CompletableFuture write(LogEntryProto entry, TransactionContext trx) { @@ -780,7 +796,7 @@ private ByteString readStateMachineData( new StorageContainerException(response.getMessage(), response.getResult()); LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, response.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), response.getCmdType(), index, response.getMessage(), response.getResult()); stateMachineHealthy.set(false); throw sce; @@ -816,15 +832,13 @@ private ByteString readStateMachineData( */ @Override public CompletableFuture flush(long index) { - List> futureList = - writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) - .map(Map.Entry::getValue).collect(Collectors.toList()); return CompletableFuture.allOf( - futureList.toArray(new CompletableFuture[futureList.size()])); + writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) + .map(Map.Entry::getValue).toArray(CompletableFuture[]::new)); } /** - * This method is used by the Leader to read state machine date for sending appendEntries to followers. + * This method is used by the Leader to read state machine data for sending appendEntries to followers. * It will first get the data from {@link #stateMachineDataCache}. * If the data is not in the cache, it will read from the file by dispatching a command * @@ -857,7 +871,7 @@ public CompletableFuture read(LogEntryProto entry, TransactionContex .map(TransactionContext::getStateMachineContext) .orElse(null); final ContainerCommandRequestProto requestProto = context != null ? context.getLogProto() - : getContainerCommandRequestProto(gid, entry.getStateMachineLogEntry().getLogData()); + : getContainerCommandRequestProto(getGroupId(), entry.getStateMachineLogEntry().getLogData()); if (requestProto.getCmdType() != Type.WriteChunk) { throw new IllegalStateException("Cmd type:" + requestProto.getCmdType() @@ -875,7 +889,7 @@ public CompletableFuture read(LogEntryProto entry, TransactionContex return future; } catch (Exception e) { metrics.incNumReadStateMachineFails(); - LOG.error("{} unable to read stateMachineData:", gid, e); + LOG.error("{} unable to read stateMachineData:", getGroupId(), e); return completeExceptionally(e); } } @@ -921,7 +935,7 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS // from `HddsDatanodeService.stop()`, otherwise, it indicates this `close` originates from ratis. if (allServer) { if (datanodeService != null && !datanodeService.isStopped()) { - LOG.info("{} is closed by ratis", gid); + LOG.info("{} is closed by ratis", getGroupId()); if (semaphore.tryAcquire()) { // run with a different thread, so this raft group can be closed Runnable runnable = () -> { @@ -953,7 +967,7 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS CompletableFuture.runAsync(runnable); } } else { - LOG.info("{} is closed by HddsDatanodeService", gid); + LOG.info("{} is closed by HddsDatanodeService", getGroupId()); } } } @@ -965,6 +979,11 @@ private CompletableFuture applyTransaction( final CheckedSupplier task = () -> { try { + try { + this.validatePeers(); + } catch (StorageContainerException e) { + return ContainerUtils.logAndReturnError(LOG, e, request); + } long timeNow = Time.monotonicNowNanos(); long queueingDelay = timeNow - context.getStartTime(); metrics.recordQueueingDelay(request.getCmdType(), queueingDelay); @@ -984,14 +1003,17 @@ private CompletableFuture applyTransaction( private void removeStateMachineDataIfNeeded(long index) { if (waitOnBothFollowers) { try { - RaftServer.Division division = ratisServer.getServer().getDivision(gid); + RaftServer.Division division = ratisServer.getServer().getDivision(getGroupId()); if (division.getInfo().isLeader()) { - long minIndex = Arrays.stream(division.getInfo() - .getFollowerNextIndices()).min().getAsLong(); - LOG.debug("Removing data corresponding to log index {} min index {} " - + "from cache", index, minIndex); - removeCacheDataUpTo(Math.min(minIndex, index)); + Arrays.stream(division.getInfo() + .getFollowerNextIndices()).min().ifPresent(minIndex -> { + removeCacheDataUpTo(Math.min(minIndex, index)); + LOG.debug("Removing data corresponding to log index {} min index {} " + + "from cache", index, minIndex); + }); } + } catch (RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } @@ -1042,7 +1064,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { CompletableFuture applyTransactionFuture = new CompletableFuture<>(); final Consumer exceptionHandler = e -> { - LOG.error(gid + ": failed to applyTransaction at logIndex " + index + LOG.error(getGroupId() + ": failed to applyTransaction at logIndex " + index + " for " + requestProto.getCmdType(), e); stateMachineHealthy.compareAndSet(true, false); metrics.incNumApplyTransactionsFails(); @@ -1070,7 +1092,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { new StorageContainerException(r.getMessage(), r.getResult()); LOG.error( "gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), r.getCmdType(), index, r.getMessage(), r.getResult()); metrics.incNumApplyTransactionsFails(); // Since the applyTransaction now is completed exceptionally, @@ -1079,12 +1101,12 @@ public CompletableFuture applyTransaction(TransactionContext trx) { // shutdown. applyTransactionFuture.completeExceptionally(sce); stateMachineHealthy.compareAndSet(true, false); - ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole()); + ratisServer.handleApplyTransactionFailure(getGroupId(), trx.getServerRole()); } else { if (LOG.isDebugEnabled()) { LOG.debug( "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), r.getCmdType(), index, r.getMessage(), r.getResult()); } if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { @@ -1161,26 +1183,26 @@ public void evictStateMachineCache() { } @Override - public void notifyFollowerSlowness(RoleInfoProto roleInfoProto) { - ratisServer.handleNodeSlowness(gid, roleInfoProto); + public void notifyFollowerSlowness(RoleInfoProto roleInfoProto, RaftPeer follower) { + ratisServer.handleFollowerSlowness(getGroupId(), roleInfoProto, follower); } @Override public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) { - ratisServer.handleNoLeader(gid, roleInfoProto); + ratisServer.handleNoLeader(getGroupId(), roleInfoProto); } @Override public void notifyLogFailed(Throwable t, LogEntryProto failedEntry) { - LOG.error("{}: {} {}", gid, TermIndex.valueOf(failedEntry), + LOG.error("{}: {} {}", getGroupId(), TermIndex.valueOf(failedEntry), toStateMachineLogEntryString(failedEntry.getStateMachineLogEntry()), t); - ratisServer.handleNodeLogFailure(gid, t); + ratisServer.handleNodeLogFailure(getGroupId(), t); } @Override public CompletableFuture notifyInstallSnapshotFromLeader( RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, + ratisServer.handleInstallSnapshotFromLeader(getGroupId(), roleInfoProto, firstTermIndexInLog); final CompletableFuture future = new CompletableFuture<>(); future.complete(firstTermIndexInLog); @@ -1189,7 +1211,7 @@ public CompletableFuture notifyInstallSnapshotFromLeader( @Override public void notifyGroupRemove() { - ratisServer.notifyGroupRemove(gid); + ratisServer.notifyGroupRemove(getGroupId()); // Make best effort to quasi-close all the containers on group removal. // Containers already in terminal state like CLOSED or UNHEALTHY will not // be affected. @@ -1197,7 +1219,7 @@ public void notifyGroupRemove() { try { containerController.markContainerForClose(cid); containerController.quasiCloseContainer(cid, - "Ratis group removed"); + "Ratis group removed. Group id: " + getGroupId()); } catch (IOException e) { LOG.debug("Failed to quasi-close container {}", cid); } @@ -1219,7 +1241,7 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, @Override public String toStateMachineLogEntryString(StateMachineLogEntryProto proto) { - return smProtoToString(gid, containerController, proto); + return smProtoToString(getGroupId(), containerController, proto); } public static String smProtoToString(RaftGroupId gid, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 7899cdcc0e6..a4c14343985 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -104,6 +104,7 @@ import org.apache.ratis.server.RaftServerRpc; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.storage.RaftStorage; +import org.apache.ratis.util.Preconditions; import org.apache.ratis.util.SizeInBytes; import org.apache.ratis.util.TimeDuration; import org.apache.ratis.util.TraditionalBinaryPrefix; @@ -161,19 +162,18 @@ private static long nextCallId() { private int clientPort; private int dataStreamPort; private final RaftServer server; + private final String name; private final List chunkExecutors; private final ContainerDispatcher dispatcher; private final ContainerController containerController; private final ClientId clientId = ClientId.randomId(); private final StateContext context; - private final long nodeFailureTimeoutMs; private boolean isStarted = false; private final DatanodeDetails datanodeDetails; private final ConfigurationSource conf; // TODO: Remove the gids set when Ratis supports an api to query active // pipelines private final ConcurrentMap activePipelines = new ConcurrentHashMap<>(); - private final RaftPeerId raftPeerId; // Timeout used while calling submitRequest directly. private final long requestTimeout; private final boolean shouldDeleteRatisLogDirectory; @@ -197,14 +197,14 @@ private XceiverServerRatis(HddsDatanodeService hddsDatanodeService, DatanodeDeta this.context = context; this.dispatcher = dispatcher; this.containerController = containerController; - this.raftPeerId = RatisHelper.toRaftPeerId(dd); String threadNamePrefix = datanodeDetails.threadNamePrefix(); chunkExecutors = createChunkExecutors(conf, threadNamePrefix); - nodeFailureTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout(); shouldDeleteRatisLogDirectory = ratisServerConfig.shouldDeleteRatisLogDirectory(); RaftProperties serverProperties = newRaftProperties(); + final RaftPeerId raftPeerId = RatisHelper.toRaftPeerId(dd); + this.name = getClass().getSimpleName() + "(" + raftPeerId + ")"; this.server = RaftServer.newBuilder().setServerId(raftPeerId) .setProperties(serverProperties) @@ -474,7 +474,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { // NOTE : the default value for the retry count in ratis is -1, // which means retry indefinitely. - int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / + final int syncTimeoutRetryDefault = (int) ratisServerConfig.getFollowerSlownessTimeout() / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, @@ -558,7 +558,7 @@ private static Parameters createTlsParameters(SecurityConfig conf, @Override public void start() throws IOException { if (!isStarted) { - LOG.info("Starting {} {}", getClass().getSimpleName(), server.getId()); + LOG.info("Starting {}", name); for (ThreadPoolExecutor executor : chunkExecutors) { executor.prestartAllCoreThreads(); } @@ -581,11 +581,11 @@ public void start() throws IOException { } } - private int getRealPort(InetSocketAddress address, Port.Name name) { + private int getRealPort(InetSocketAddress address, Port.Name portName) { int realPort = address.getPort(); - datanodeDetails.setPort(DatanodeDetails.newPort(name, realPort)); - LOG.info("{} {} is started using port {} for {}", - getClass().getSimpleName(), server.getId(), realPort, name); + final Port port = DatanodeDetails.newPort(portName, realPort); + datanodeDetails.setPort(port); + LOG.info("{} is started using port {}", name, port); return realPort; } @@ -593,7 +593,7 @@ private int getRealPort(InetSocketAddress address, Port.Name name) { public void stop() { if (isStarted) { try { - LOG.info("Stopping {} {}", getClass().getSimpleName(), server.getId()); + LOG.info("Closing {}", name); // shutdown server before the executors as while shutting down, // some of the tasks would be executed using the executors. server.close(); @@ -602,7 +602,7 @@ public void stop() { } isStarted = false; } catch (IOException e) { - LOG.error("XceiverServerRatis Could not be stopped gracefully.", e); + LOG.error("Failed to close {}.", name, e); } } } @@ -706,45 +706,40 @@ private GroupInfoRequest createGroupInfoRequest( nextCallId()); } - private void handlePipelineFailure(RaftGroupId groupId, - RoleInfoProto roleInfoProto) { - String msg; - UUID datanode = RatisHelper.toDatanodeId(roleInfoProto.getSelf()); - RaftPeerId id = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); + private void handlePipelineFailure(RaftGroupId groupId, RoleInfoProto roleInfoProto, String reason) { + final RaftPeerId raftPeerId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); + Preconditions.assertEquals(getServer().getId(), raftPeerId, "raftPeerId"); + final StringBuilder b = new StringBuilder() + .append(name).append(" with datanodeId ").append(RatisHelper.toDatanodeId(raftPeerId)) + .append("handlePipelineFailure ").append(" for ").append(reason) + .append(": ").append(roleInfoProto.getRole()) + .append(" elapsed time=").append(roleInfoProto.getRoleElapsedTimeMs()).append("ms"); + switch (roleInfoProto.getRole()) { case CANDIDATE: - msg = datanode + " is in candidate state for " + - roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms"; + final long lastLeaderElapsedTime = roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs(); + b.append(", lastLeaderElapsedTime=").append(lastLeaderElapsedTime).append("ms"); break; case FOLLOWER: - msg = datanode + " closes pipeline when installSnapshot from leader " + - "because leader snapshot doesn't contain any data to replay, " + - "all the log entries prior to the snapshot might have been purged." + - "So follower should not try to install snapshot from leader but" + - "can close the pipeline here. It's in follower state for " + - roleInfoProto.getRoleElapsedTimeMs() + "ms"; + b.append(", outstandingOp=").append(roleInfoProto.getFollowerInfo().getOutstandingOp()); break; case LEADER: - StringBuilder sb = new StringBuilder(); - sb.append(datanode).append(" has not seen follower/s"); - for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo() - .getFollowerInfoList()) { - if (follower.getLastRpcElapsedTimeMs() > nodeFailureTimeoutMs) { - sb.append(" ").append(RatisHelper.toDatanodeId(follower.getId())) - .append(" for ").append(follower.getLastRpcElapsedTimeMs()) - .append("ms"); - } + final long followerSlownessTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout(); + for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo().getFollowerInfoList()) { + final long lastRpcElapsedTimeMs = follower.getLastRpcElapsedTimeMs(); + final boolean slow = lastRpcElapsedTimeMs > followerSlownessTimeoutMs; + final RaftPeerId followerId = RaftPeerId.valueOf(follower.getId().getId()); + b.append("\n Follower ").append(followerId) + .append(" with datanodeId ").append(RatisHelper.toDatanodeId(followerId)) + .append(" is ").append(slow ? "slow" : " responding") + .append(" with lastRpcElapsedTime=").append(lastRpcElapsedTimeMs).append("ms"); } - msg = sb.toString(); break; default: - LOG.error("unknown state: {}", roleInfoProto.getRole()); - throw new IllegalStateException("node" + id + " is in illegal role " - + roleInfoProto.getRole()); + throw new IllegalStateException("Unexpected role " + roleInfoProto.getRole()); } - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_FAILED); + triggerPipelineClose(groupId, b.toString(), ClosePipelineInfo.Reason.PIPELINE_FAILED); } private void triggerPipelineClose(RaftGroupId groupId, String detail, @@ -869,12 +864,12 @@ public void removeGroup(HddsProtos.PipelineID pipelineId) processReply(reply); } - void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); + void handleFollowerSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto, RaftPeer follower) { + handlePipelineFailure(groupId, roleInfoProto, "slow follower " + follower.getId()); } void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); + handlePipelineFailure(groupId, roleInfoProto, "no leader"); } void handleApplyTransactionFailure(RaftGroupId groupId, @@ -901,10 +896,9 @@ void handleApplyTransactionFailure(RaftGroupId groupId, void handleInstallSnapshotFromLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - LOG.warn("Install snapshot notification received from Leader with " + - "termIndex: {}, terminating pipeline: {}", + LOG.warn("handleInstallSnapshotFromLeader for firstTermIndexInLog={}, terminating pipeline: {}", firstTermIndexInLog, groupId); - handlePipelineFailure(groupId, roleInfoProto); + handlePipelineFailure(groupId, roleInfoProto, "install snapshot notification"); } /** @@ -950,7 +944,7 @@ void handleLeaderChangedNotification(RaftGroupMemberId groupMemberId, LOG.info("Leader change notification received for group: {} with new " + "leaderId: {}", groupMemberId.getGroupId(), raftPeerId1); // Save the reported leader to be sent with the report to SCM - boolean leaderForGroup = this.raftPeerId.equals(raftPeerId1); + final boolean leaderForGroup = server.getId().equals(raftPeerId1); activePipelines.compute(groupMemberId.getGroupId(), (key, value) -> value == null ? new ActivePipelineContext(leaderForGroup, false) : new ActivePipelineContext(leaderForGroup, value.isPendingClose())); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index b22b9148bb1..5fced0e39b3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -29,6 +29,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -36,6 +37,7 @@ import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; import org.apache.hadoop.ozone.container.common.utils.RawDB; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3; import org.apache.hadoop.util.Time; @@ -44,6 +46,7 @@ import jakarta.annotation.Nullable; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.initPerDiskDBStore; @@ -80,6 +83,8 @@ public class HddsVolume extends StorageVolume { private final VolumeIOStats volumeIOStats; private final VolumeInfoMetrics volumeInfoMetrics; + private ContainerController controller; + private final AtomicLong committedBytes = new AtomicLong(); // till Open containers become full // Mentions the type of volume @@ -119,8 +124,10 @@ private HddsVolume(Builder b) throws IOException { if (!b.getFailedVolume() && getVolumeInfo().isPresent()) { this.setState(VolumeState.NOT_INITIALIZED); + ConfigurationSource conf = getConf(); + int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); this.volumeIOStats = new VolumeIOStats(b.getVolumeRootStr(), - this.getStorageDir().toString()); + this.getStorageDir().toString(), intervals); this.volumeInfoMetrics = new VolumeInfoMetrics(b.getVolumeRootStr(), this); @@ -199,7 +206,7 @@ public void shutdown() { /** * Delete all files under - * /hdds//tmp/deleted-containers. + * volume/hdds/cluster-id/tmp/deleted-containers. * This is the directory where containers are moved when they are deleted * from the system, but before being removed from the filesystem. This * makes the deletion atomic. @@ -382,6 +389,17 @@ public void loadDbStore(boolean readOnly) throws IOException { getStorageID()); } + public void setController(ContainerController controller) { + this.controller = controller; + } + + public long getContainers() { + if (controller != null) { + return controller.getContainerCount(this); + } + return 0; + } + /** * Pick a DbVolume for HddsVolume and init db instance. * Use the HddsVolume directly if no DbVolume found. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index e195b127d49..9afea8e6b0c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -44,6 +44,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.apache.ratis.util.function.CheckedRunnable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,7 +85,7 @@ public class MutableVolumeSet implements VolumeSet { private String clusterID; private final StorageVolumeChecker volumeChecker; - private Runnable failedVolumeListener; + private CheckedRunnable failedVolumeListener; private StateContext context; private final StorageVolumeFactory volumeFactory; private final StorageVolume.VolumeType volumeType; @@ -132,7 +133,7 @@ public MutableVolumeSet(String dnUuid, String clusterID, initializeVolumeSet(); } - public void setFailedVolumeListener(Runnable runnable) { + public void setFailedVolumeListener(CheckedRunnable runnable) { failedVolumeListener = runnable; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java index e22addd354f..2ce19c3bf19 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java @@ -21,7 +21,10 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; +import org.apache.hadoop.metrics2.lib.MutableRate; /** * This class is used to track Volume IO stats for each HDDS Volume. @@ -29,12 +32,23 @@ public class VolumeIOStats { private String metricsSourceName = VolumeIOStats.class.getSimpleName(); private String storageDirectory; - private @Metric MutableCounterLong readBytes; - private @Metric MutableCounterLong readOpCount; - private @Metric MutableCounterLong writeBytes; - private @Metric MutableCounterLong writeOpCount; - private @Metric MutableCounterLong readTime; - private @Metric MutableCounterLong writeTime; + private final MetricsRegistry registry = new MetricsRegistry("VolumeIOStats"); + @Metric + private MutableCounterLong readBytes; + @Metric + private MutableCounterLong readOpCount; + @Metric + private MutableCounterLong writeBytes; + @Metric + private MutableCounterLong writeOpCount; + @Metric + private MutableRate readTime; + @Metric + private MutableQuantiles[] readLatencyQuantiles; + @Metric + private MutableRate writeTime; + @Metric + private MutableQuantiles[] writeLatencyQuantiles; @Deprecated public VolumeIOStats() { @@ -44,9 +58,24 @@ public VolumeIOStats() { /** * @param identifier Typically, path to volume root. e.g. /data/hdds */ - public VolumeIOStats(String identifier, String storageDirectory) { + public VolumeIOStats(String identifier, String storageDirectory, int[] intervals) { this.metricsSourceName += '-' + identifier; this.storageDirectory = storageDirectory; + + // Try initializing `readLatencyQuantiles` and `writeLatencyQuantiles` + if (intervals != null && intervals.length > 0) { + final int length = intervals.length; + readLatencyQuantiles = new MutableQuantiles[intervals.length]; + writeLatencyQuantiles = new MutableQuantiles[intervals.length]; + for (int i = 0; i < length; i++) { + readLatencyQuantiles[i] = registry.newQuantiles( + "readLatency" + intervals[i] + "s", + "Read Data File Io Latency in ms", "ops", "latency", intervals[i]); + writeLatencyQuantiles[i] = registry.newQuantiles( + "writeLatency" + intervals[i] + "s", + "Write Data File Io Latency in ms", "ops", "latency", intervals[i]); + } + } init(); } @@ -99,7 +128,10 @@ public void incWriteOpCount() { * @param time */ public void incReadTime(long time) { - readTime.incr(time); + readTime.add(time); + for (MutableQuantiles q : readLatencyQuantiles) { + q.add(time); + } } /** @@ -107,7 +139,10 @@ public void incReadTime(long time) { * @param time */ public void incWriteTime(long time) { - writeTime.incr(time); + writeTime.add(time); + for (MutableQuantiles q : writeLatencyQuantiles) { + q.add(time); + } } /** @@ -147,7 +182,7 @@ public long getWriteOpCount() { * @return long */ public long getReadTime() { - return readTime.value(); + return (long) readTime.lastStat().total(); } /** @@ -155,7 +190,7 @@ public long getReadTime() { * @return long */ public long getWriteTime() { - return writeTime.value(); + return (long) writeTime.lastStat().total(); } @Metric diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index af890269255..3d1be9791ec 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -46,16 +46,18 @@ * - fsCapacity: reported total capacity from local fs. * - minVolumeFreeSpace (mvfs) : determines the free space for closing containers.This is like adding a few reserved bytes to reserved space. - Dn's will send close container action to SCM at this limit & it is + Dn's will send close container action to SCM at this limit, and it is configurable. * - * + *

    + * {@code
      * |----used----|   (avail)   |++mvfs++|++++reserved+++++++|
      * |<-     capacity                  ->|
      *              |     fsAvail      |-------other-----------|
      * |<-                   fsCapacity                      ->|
    - *
    + * }
    + *
      * What we could directly get from local fs:
      *     fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
      * We could get from config:
    @@ -78,11 +80,13 @@
      * then we should use DedicatedDiskSpaceUsage for
      * `hdds.datanode.du.factory.classname`,
      * Then it is much simpler, since we don't care about other usage:
    - *
    + * {@code
      *  |----used----|             (avail)/fsAvail              |
      *  |<-              capacity/fsCapacity                  ->|
    + * }
      *
      *  We have avail == fsAvail.
    + *  
    */ public final class VolumeInfo { @@ -153,11 +157,14 @@ public long getCapacity() { } /** + *
    +   * {@code
        * Calculate available space use method A.
        * |----used----|   (avail)   |++++++++reserved++++++++|
        * |<-     capacity         ->|
    -   *
        * A) avail = capacity - used
    +   * }
    +   * 
    */ public long getAvailable() { return usage.getAvailable(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index 68140600db9..cd31b8063d3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -37,6 +37,7 @@ public class VolumeInfoMetrics { private final HddsVolume volume; @Metric("Returns the RocksDB compact times of the Volume") private MutableRate dbCompactLatency; + private long containers; /** * @param identifier Typically, path to volume root. E.g. /data/hdds @@ -153,4 +154,11 @@ public void dbCompactTimesNanoSecondsIncr(long time) { dbCompactLatency.add(time); } + /** + * Return the Container Count of the Volume. + */ + @Metric("Returns the Container Count of the Volume") + public long getContainers() { + return volume.getContainers(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 7e138b05716..34ba66c91bb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.volume; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -77,11 +78,15 @@ public long getUsedSpace() { } /** + *
    +   * {@code
        * Calculate available space use method B.
        * |----used----|   (avail)   |++++++++reserved++++++++|
        *              |     fsAvail      |-------other-------|
    -   *                          ->|~~~~|<-
    +   *                          ->|~~~~|<-
        *                      remainingReserved
    +   * }
    +   * 
    * B) avail = fsAvail - Max(reserved - other, 0); */ public SpaceUsageSource getCurrentUsage() { @@ -216,9 +221,8 @@ private static long getReserved(ConfigurationSource conf, String rootDir, for (String reserve : reserveList) { String[] words = reserve.split(":"); if (words.length < 2) { - LOG.error("Reserved space should be configured in a pair, but current value is {}", - reserve); - continue; + throw new ConfigurationException("hdds.datanode.dir.du.reserved - " + + "Reserved space should be configured in a pair, but current value is " + reserve); } try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index 9dedd65565f..95b7d06167f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -18,26 +18,24 @@ package org.apache.hadoop.ozone.container.ec.reconstruction; import com.google.common.collect.ImmutableList; +import jakarta.annotation.Nonnull; import org.apache.commons.collections.map.SingletonMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; -import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,7 +44,6 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; /** * This class wraps necessary container-level rpc calls @@ -69,21 +66,17 @@ public ECContainerOperationClient(ConfigurationSource conf, } @Nonnull - private static XceiverClientManager createClientManager( - ConfigurationSource conf, CertificateClient certificateClient) + private static XceiverClientManager createClientManager(ConfigurationSource conf, CertificateClient certificateClient) throws IOException { ClientTrustManager trustManager = null; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - CACertificateProvider localCaCerts = - () -> HAUtils.buildCAX509List(certificateClient, conf); - CACertificateProvider remoteCacerts = - () -> HAUtils.buildCAX509List(null, conf); - trustManager = new ClientTrustManager(remoteCacerts, localCaCerts); + trustManager = certificateClient.createClientTrustManager(); } - return new XceiverClientManager(conf, - new XceiverClientManager.XceiverClientManagerConfigBuilder() - .setMaxCacheSize(256).setStaleThresholdMs(10 * 1000).build(), - trustManager); + XceiverClientManager.ScmClientConfig scmClientConfig = new XceiverClientManager.XceiverClientManagerConfigBuilder() + .setMaxCacheSize(256) + .setStaleThresholdMs(10 * 1000) + .build(); + return new XceiverClientManager(conf, scmClientConfig, trustManager); } public BlockData[] listBlock(long containerId, DatanodeDetails dn, @@ -99,14 +92,11 @@ public BlockData[] listBlock(long containerId, DatanodeDetails dn, try { return BlockData.getFromProtoBuf(i); } catch (IOException e) { - LOG.debug("Failed while converting to protobuf BlockData. Returning" - + " null for listBlock from DN: " + dn, - e); + LOG.debug("Failed while converting to protobuf BlockData. Returning null for listBlock from DN: {}", dn, e); // TODO: revisit here. return null; } - }).collect(Collectors.toList()) - .toArray(new BlockData[blockDataList.size()]); + }).toArray(BlockData[]::new); } finally { this.xceiverClientManager.releaseClient(xceiverClient, false); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 7e64766b41c..f1e1d0d900b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory; import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl; import org.apache.hadoop.ozone.client.io.ECBlockInputStreamProxy; @@ -371,7 +370,7 @@ private void logBlockGroupDetails(BlockLocationInfo blockLocationInfo, .append(" block length: ") .append(data.getSize()) .append(" block group length: ") - .append(getBlockDataLength(data)) + .append(data.getBlockGroupLength()) .append(" chunk list: \n"); int cnt = 0; for (ContainerProtos.ChunkInfo chunkInfo : data.getChunks()) { @@ -573,7 +572,7 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup, continue; } - long putBlockLen = getBlockDataLength(blockGroup[i]); + long putBlockLen = blockGroup[i].getBlockGroupLength(); // Use safe length is the minimum of the lengths recorded across the // stripe blockGroupLen = Math.min(putBlockLen, blockGroupLen); @@ -581,16 +580,6 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup, return blockGroupLen == Long.MAX_VALUE ? 0 : blockGroupLen; } - private long getBlockDataLength(BlockData blockData) { - String lenStr = blockData.getMetadata() - .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK); - // If we don't have the length, then it indicates a problem with the stripe. - // All replica should carry the length, so if it is not there, we return 0, - // which will cause us to set the length of the block to zero and not - // attempt to reconstruct it. - return (lenStr == null) ? 0 : Long.parseLong(lenStr); - } - public ECReconstructionMetrics getECReconstructionMetrics() { return this.metrics; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java index 6d32f3a3f3e..a50a125f6d4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java @@ -46,6 +46,16 @@ public ECReconstructionCoordinatorTask( debugString = reconstructionCommandInfo.toString(); } + @Override + public String getMetricName() { + return "ECReconstructions"; + } + + @Override + public String getMetricDescriptionSegment() { + return "EC reconstructions"; + } + @Override public void runTask() { // Implement the coordinator logic to handle a container group diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index cea6737c7c9..b4ff62e52d2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -935,7 +935,6 @@ private ContainerReplicaProto.State getHddsState() /** * Returns container DB file. - * @return */ public File getContainerDBFile() { return KeyValueContainerLocationUtil.getContainerDBFile(containerData); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index ccc24dad0f9..708038bd13f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -431,7 +431,6 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() { /** * Schema v3 use a prefix as startKey, * for other schemas just return null. - * @return */ public String startKeyEmpty() { if (hasSchema(SCHEMA_V3)) { @@ -443,7 +442,6 @@ public String startKeyEmpty() { /** * Schema v3 use containerID as key prefix, * for other schemas just return null. - * @return */ public String containerPrefix() { if (hasSchema(SCHEMA_V3)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index d1028727648..06987f63561 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -103,6 +103,8 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; + +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CLOSED_CONTAINER_IO; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR; @@ -110,6 +112,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DELETE_ON_NON_EMPTY_CONTAINER; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.GET_SMALL_FILE_ERROR; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_ARGUMENT; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR; @@ -132,11 +135,10 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto.State.RECOVERING; + import org.apache.hadoop.ozone.container.common.interfaces.ScanResult; -import static org.apache.hadoop.ozone.ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST; +import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; import org.apache.hadoop.util.Time; import org.apache.ratis.statemachine.StateMachine; @@ -259,6 +261,15 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) { Type cmdType = request.getCmdType(); + // Validate the request has been made to the correct datanode with the node id matching. + if (kvContainer != null) { + try { + handler.validateRequestDatanodeId(kvContainer.getContainerData().getReplicaIndex(), + request.getDatanodeUuid()); + } catch (StorageContainerException e) { + return ContainerUtils.logAndReturnError(LOG, e, request); + } + } switch (cmdType) { case CreateContainer: @@ -377,7 +388,23 @@ ContainerCommandResponseProto handleCreateContainer( " already exists", null, CONTAINER_ALREADY_EXISTS), request); } + try { + this.validateRequestDatanodeId(request.getCreateContainer().hasReplicaIndex() ? + request.getCreateContainer().getReplicaIndex() : null, request.getDatanodeUuid()); + } catch (StorageContainerException e) { + return ContainerUtils.logAndReturnError(LOG, e, request); + } + long containerID = request.getContainerID(); + State containerState = request.getCreateContainer().getState(); + + if (containerState != RECOVERING) { + try { + containerSet.ensureContainerNotMissing(containerID, containerState); + } catch (StorageContainerException ex) { + return ContainerUtils.logAndReturnError(LOG, ex, request); + } + } ContainerLayoutVersion layoutVersion = ContainerLayoutVersion.getConfiguredVersion(conf); @@ -402,7 +429,11 @@ ContainerCommandResponseProto handleCreateContainer( try { if (containerSet.getContainer(containerID) == null) { newContainer.create(volumeSet, volumeChoosingPolicy, clusterId); - created = containerSet.addContainer(newContainer); + if (RECOVERING == newContainer.getContainerState()) { + created = containerSet.addContainerByOverwriteMissingContainer(newContainer); + } else { + created = containerSet.addContainer(newContainer); + } } else { // The create container request for an already existing container can // arrive in case the ContainerStateMachine reapplies the transaction @@ -595,14 +626,20 @@ ContainerCommandResponseProto handlePutBlock( boolean endOfBlock = false; if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) { - // in EC, we will be doing empty put block. - // So, let's flush only when there are any chunks - if (!request.getPutBlock().getBlockData().getChunksList().isEmpty()) { + // There are two cases where client sends empty put block with eof. + // (1) An EC empty file. In this case, the block/chunk file does not exist, + // so no need to flush/close the file. + // (2) Ratis output stream in incremental chunk list mode may send empty put block + // to close the block, in which case we need to flush/close the file. + if (!request.getPutBlock().getBlockData().getChunksList().isEmpty() || + blockData.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST)) { chunkManager.finishWriteChunks(kvContainer, blockData); } endOfBlock = true; } + // Note: checksum held inside blockData. But no extra checksum validation here with handlePutBlock. + long bcsId = dispatcherContext == null ? 0 : dispatcherContext.getLogIndex(); blockData.setBlockCommitSequenceId(bcsId); @@ -718,15 +755,6 @@ ContainerCommandResponseProto handleGetContainerChecksumInfo( return getGetContainerMerkleTreeResponse(request, checksumTree); } - /** - * Checks if a replicaIndex needs to be checked based on the client version for a request. - * @param request ContainerCommandRequest object. - * @return true if the validation is required for the client version else false. - */ - private boolean replicaIndexCheckRequired(ContainerCommandRequestProto request) { - return request.hasVersion() && request.getVersion() >= EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue(); - } - /** * Handle Get Block operation. Calls BlockManager to process the request. */ @@ -745,9 +773,7 @@ ContainerCommandResponseProto handleGetBlock( try { BlockID blockID = BlockID.getFromProtobuf( request.getGetBlock().getBlockID()); - if (replicaIndexCheckRequired(request)) { - BlockUtils.verifyReplicaIdx(kvContainer, blockID); - } + BlockUtils.verifyReplicaIdx(kvContainer, blockID); responseData = blockManager.getBlock(kvContainer, blockID).getProtoBufMessage(); final long numBytes = responseData.getSerializedSize(); metrics.incContainerBytesStats(Type.GetBlock, numBytes); @@ -870,9 +896,7 @@ ContainerCommandResponseProto handleReadChunk( ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk() .getChunkData()); Preconditions.checkNotNull(chunkInfo); - if (replicaIndexCheckRequired(request)) { - BlockUtils.verifyReplicaIdx(kvContainer, blockID); - } + BlockUtils.verifyReplicaIdx(kvContainer, blockID); BlockUtils.verifyBCSId(kvContainer, blockID); if (dispatcherContext == null) { @@ -972,6 +996,7 @@ ContainerCommandResponseProto handleWriteChunk( if (isWrite) { data = ChunkBuffer.wrap(writeChunk.getData().asReadOnlyByteBufferList()); + // TODO: Can improve checksum validation here. Make this one-shot after protocol change. validateChunkChecksumData(data, chunkInfo); } chunkManager @@ -992,6 +1017,9 @@ ContainerCommandResponseProto handleWriteChunk( // of order. blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); boolean eob = writeChunk.getBlock().getEof(); + if (eob) { + chunkManager.finishWriteChunks(kvContainer, blockData); + } blockManager.putBlock(kvContainer, blockData, eob); blockDataProto = blockData.getProtoBufMessage(); final long numBytes = blockDataProto.getSerializedSize(); @@ -1217,7 +1245,7 @@ private void checkContainerOpen(KeyValueContainer kvContainer) * might already be in closing state here. */ if (containerState == State.OPEN || containerState == State.CLOSING - || containerState == State.RECOVERING) { + || containerState == RECOVERING) { return; } @@ -1698,4 +1726,22 @@ public static FaultInjector getInjector() { public static void setInjector(FaultInjector instance) { injector = instance; } + + /** + * Verify if request's replicaIndex matches with containerData. This validates only for EC containers i.e. + * containerReplicaIdx should be > 0. + * + * @param containerReplicaIdx replicaIndex for the container command. + * @param requestDatanodeUUID requested block info + * @throws StorageContainerException if replicaIndex mismatches. + */ + private boolean validateRequestDatanodeId(Integer containerReplicaIdx, String requestDatanodeUUID) + throws StorageContainerException { + if (containerReplicaIdx != null && containerReplicaIdx > 0 && !requestDatanodeUUID.equals(this.getDatanodeId())) { + throw new StorageContainerException( + String.format("Request is trying to write to node with uuid : %s but the current nodeId is: %s .", + requestDatanodeUUID, this.getDatanodeId()), INVALID_ARGUMENT); + } + return true; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 7773b54f794..8bbc2478004 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -99,7 +99,6 @@ public static DatanodeStore getUncachedDatanodeStore( * opened by this thread, the other thread will get a RocksDB exception. * @param containerData The container data * @param conf Configuration - * @return * @throws IOException */ public static DatanodeStore getUncachedDatanodeStore( @@ -248,7 +247,9 @@ public static void verifyBCSId(Container container, BlockID blockID) public static void verifyReplicaIdx(Container container, BlockID blockID) throws IOException { Integer containerReplicaIndex = container.getContainerData().getReplicaIndex(); - if (containerReplicaIndex > 0 && !containerReplicaIndex.equals(blockID.getReplicaIndex())) { + Integer blockReplicaIndex = blockID.getReplicaIndex(); + if (containerReplicaIndex > 0 && blockReplicaIndex != null && blockReplicaIndex != 0 && + !containerReplicaIndex.equals(blockReplicaIndex)) { throw new StorageContainerException( "Unable to find the Container with replicaIdx " + blockID.getReplicaIndex() + ". Container " + container.getContainerData().getContainerID() + " replicaIdx is " diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index 0fac45571c7..dc048ac16aa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -39,6 +39,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.function.ToLongFunction; @@ -50,6 +51,7 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -200,11 +202,12 @@ private static long writeDataToChannel(FileChannel channel, ChunkBuffer data, } } + @SuppressWarnings("checkstyle:parameternumber") public static ChunkBuffer readData(long len, int bufferCapacity, - File file, long off, HddsVolume volume, int readMappedBufferThreshold) - throws StorageContainerException { - if (len > readMappedBufferThreshold) { - return readData(file, bufferCapacity, off, len, volume); + File file, long off, HddsVolume volume, int readMappedBufferThreshold, boolean mmapEnabled, + MappedBufferManager mappedBufferManager) throws StorageContainerException { + if (mmapEnabled && len > readMappedBufferThreshold && bufferCapacity > readMappedBufferThreshold) { + return readData(file, bufferCapacity, off, len, volume, mappedBufferManager); } else if (len == 0) { return ChunkBuffer.wrap(Collections.emptyList()); } @@ -256,25 +259,52 @@ private static void readData(File file, long offset, long len, * @return a list of {@link MappedByteBuffer} containing the data. */ private static ChunkBuffer readData(File file, int chunkSize, - long offset, long length, HddsVolume volume) + long offset, long length, HddsVolume volume, MappedBufferManager mappedBufferManager) throws StorageContainerException { - final List buffers = new ArrayList<>( - Math.toIntExact((length - 1) / chunkSize) + 1); - readData(file, offset, length, channel -> { - long readLen = 0; - while (readLen < length) { - final int n = Math.toIntExact(Math.min(length - readLen, chunkSize)); - final ByteBuffer mapped = channel.map( - FileChannel.MapMode.READ_ONLY, offset + readLen, n); - LOG.debug("mapped: offset={}, readLen={}, n={}, {}", - offset, readLen, n, mapped.getClass()); - readLen += mapped.remaining(); - buffers.add(mapped); + final int bufferNum = Math.toIntExact((length - 1) / chunkSize) + 1; + if (!mappedBufferManager.getQuota(bufferNum)) { + // proceed with normal buffer + final ByteBuffer[] buffers = BufferUtils.assignByteBuffers(length, + chunkSize); + readData(file, offset, length, c -> c.position(offset).read(buffers), volume); + Arrays.stream(buffers).forEach(ByteBuffer::flip); + return ChunkBuffer.wrap(Arrays.asList(buffers)); + } else { + try { + // proceed with mapped buffer + final List buffers = new ArrayList<>(bufferNum); + readData(file, offset, length, channel -> { + long readLen = 0; + while (readLen < length) { + final int n = Math.toIntExact(Math.min(length - readLen, chunkSize)); + final long finalOffset = offset + readLen; + final AtomicReference exception = new AtomicReference<>(); + ByteBuffer mapped = mappedBufferManager.computeIfAbsent(file.getAbsolutePath(), finalOffset, n, + () -> { + try { + return channel.map(FileChannel.MapMode.READ_ONLY, finalOffset, n); + } catch (IOException e) { + LOG.error("Failed to map file {} with offset {} and length {}", file, finalOffset, n); + exception.set(e); + return null; + } + }); + if (mapped == null) { + throw exception.get(); + } + LOG.debug("mapped: offset={}, readLen={}, n={}, {}", finalOffset, readLen, n, mapped.getClass()); + readLen += mapped.remaining(); + buffers.add(mapped); + } + return readLen; + }, volume); + return ChunkBuffer.wrap(buffers); + } catch (Throwable e) { + mappedBufferManager.releaseQuota(bufferNum); + throw e; } - return readLen; - }, volume); - return ChunkBuffer.wrap(buffers); + } } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index b287d9ac133..dd719a81fb3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -439,13 +439,13 @@ public static boolean isSameSchemaVersion(String schema, String other) { /** * Moves container directory to a new location - * under "/hdds//tmp/deleted-containers" + * under "volume/hdds/cluster-id/tmp/deleted-containers" * and updates metadata and chunks path. * Containers will be moved under it before getting deleted * to avoid, in case of failure, having artifact leftovers * on the default container path on the disk. * - * Delete operation for Schema < V3 + * Delete operation for Schema < V3 * 1. Container is marked DELETED * 2. Container is removed from memory container set * 3. Container DB handler from cache is removed and closed @@ -460,7 +460,6 @@ public static boolean isSameSchemaVersion(String schema, String other) { * 5. Container is deleted from tmp directory. * * @param keyValueContainerData - * @return true if renaming was successful */ public static void moveToDeletedContainerDir( KeyValueContainerData keyValueContainerData, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 7b3852011d3..6232b843567 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -64,6 +64,7 @@ public class BlockManagerImpl implements BlockManager { // Default Read Buffer capacity when Checksum is not present private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; + private final int readMappedBufferMaxCount; /** * Constructs a Block Manager. @@ -79,6 +80,9 @@ public BlockManagerImpl(ConfigurationSource conf) { this.readMappedBufferThreshold = config.getBufferSize( ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY, ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT); + this.readMappedBufferMaxCount = config.getInt( + ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_KEY, + ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT); } @Override @@ -304,6 +308,11 @@ public int getReadMappedBufferThreshold() { return readMappedBufferThreshold; } + /** @return the max count of memory mapped buffers for read. */ + public int getReadMappedBufferMaxCount() { + return readMappedBufferMaxCount; + } + /** * Deletes an existing block. * As Deletion is handled by BlockDeletingService, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 288a2d3e331..aa5d52f3cee 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -46,7 +46,6 @@ private ChunkManagerFactory() { * @param conf Configuration * @param manager This parameter will be used only for read data of * FILE_PER_CHUNK layout file. Can be null for other cases. - * @return */ public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index a87b184ccec..4ca578d7717 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -75,6 +75,8 @@ public class FilePerBlockStrategy implements ChunkManager { private final OpenFiles files = new OpenFiles(); private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; + private final int readMappedBufferMaxCount; + private final MappedBufferManager mappedBufferManager; private final VolumeSet volumeSet; public FilePerBlockStrategy(boolean sync, BlockManager manager, @@ -84,7 +86,15 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager, manager.getDefaultReadBufferCapacity(); this.readMappedBufferThreshold = manager == null ? 0 : manager.getReadMappedBufferThreshold(); + this.readMappedBufferMaxCount = manager == null ? 0 + : manager.getReadMappedBufferMaxCount(); + LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount); this.volumeSet = volSet; + if (this.readMappedBufferMaxCount > 0) { + mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount); + } else { + mappedBufferManager = null; + } } private static void checkLayoutVersion(Container container) { @@ -192,10 +202,10 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, final long len = info.getLen(); long offset = info.getOffset(); - int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, + int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity); return ChunkUtils.readData(len, bufferCapacity, chunkFile, offset, volume, - readMappedBufferThreshold); + readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index a649f573bf0..6ac88cad7f5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -67,6 +67,8 @@ public class FilePerChunkStrategy implements ChunkManager { private final BlockManager blockManager; private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; + private final int readMappedBufferMaxCount; + private final MappedBufferManager mappedBufferManager; private final VolumeSet volumeSet; public FilePerChunkStrategy(boolean sync, BlockManager manager, @@ -77,7 +79,15 @@ public FilePerChunkStrategy(boolean sync, BlockManager manager, manager.getDefaultReadBufferCapacity(); this.readMappedBufferThreshold = manager == null ? 0 : manager.getReadMappedBufferThreshold(); + this.readMappedBufferMaxCount = manager == null ? 0 + : manager.getReadMappedBufferMaxCount(); + LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount); this.volumeSet = volSet; + if (this.readMappedBufferMaxCount > 0) { + mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount); + } else { + mappedBufferManager = null; + } } private static void checkLayoutVersion(Container container) { @@ -265,7 +275,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, long offset = info.getOffset() - chunkFileOffset; Preconditions.checkState(offset >= 0); return ChunkUtils.readData(len, bufferCapacity, file, offset, volume, - readMappedBufferThreshold); + readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); } } catch (StorageContainerException ex) { //UNABLE TO FIND chunk is not a problem as we will try with the diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java new file mode 100644 index 00000000000..be2751925c7 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.keyvalue.impl; + +import com.google.common.util.concurrent.Striped; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.ref.WeakReference; +import java.nio.ByteBuffer; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.function.Supplier; + +/** + * A Manager who manages the mapped buffers to under a predefined total count, also support reuse mapped buffers. + */ +public class MappedBufferManager { + + private static ConcurrentHashMap> mappedBuffers = + new ConcurrentHashMap>(); + private static final Logger LOG = LoggerFactory.getLogger(MappedBufferManager.class); + private final Semaphore semaphore; + private final int capacity; + private final AtomicBoolean cleanupInProgress = new AtomicBoolean(false); + private final Striped lock; + + public MappedBufferManager(int capacity) { + this.capacity = capacity; + this.semaphore = new Semaphore(capacity); + this.lock = Striped.lazyWeakLock(1024); + } + + public boolean getQuota(int permits) { + boolean ret = semaphore.tryAcquire(permits); + if (ret) { + if (LOG.isDebugEnabled()) { + LOG.debug("quota is decreased by {} to total {}", permits, semaphore.availablePermits()); + } + } else { + if (cleanupInProgress.compareAndSet(false, true)) { + CompletableFuture.runAsync(() -> { + int p = 0; + try { + for (String key : mappedBuffers.keySet()) { + ByteBuffer buf = mappedBuffers.get(key).get(); + if (buf == null) { + mappedBuffers.remove(key); + p++; + } + } + if (p > 0) { + releaseQuota(p); + } + } finally { + cleanupInProgress.set(false); + } + }); + } + } + return ret; + } + + public void releaseQuota(int permits) { + semaphore.release(permits); + if (LOG.isDebugEnabled()) { + LOG.debug("quota is increased by {} to total {}", permits, semaphore.availablePermits()); + } + } + + public int availableQuota() { + return semaphore.availablePermits(); + } + + public ByteBuffer computeIfAbsent(String file, long position, long size, + Supplier supplier) { + String key = file + "-" + position + "-" + size; + Lock fileLock = lock.get(key); + fileLock.lock(); + try { + WeakReference refer = mappedBuffers.get(key); + if (refer != null && refer.get() != null) { + // reuse the mapped buffer + if (LOG.isDebugEnabled()) { + LOG.debug("find buffer for key {}", key); + } + releaseQuota(1); + return refer.get(); + } + + ByteBuffer buffer = supplier.get(); + if (buffer != null) { + mappedBuffers.put(key, new WeakReference<>(buffer)); + if (LOG.isDebugEnabled()) { + LOG.debug("add buffer for key {}", key); + } + } + return buffer; + } finally { + fileLock.unlock(); + } + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java index 8df856d4b93..601e7b2712c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java @@ -99,7 +99,9 @@ public void setLinked() { linked.set(true); } - /** @return true iff {@link StateMachine.DataChannel} is already linked. */ + /** + * @return true if {@link org.apache.ratis.statemachine.StateMachine.DataChannel} is already linked. + */ public boolean cleanUp() { if (linked.get()) { // already linked, nothing to do. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java index 6dd8590bdf3..256d357a31d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java @@ -99,6 +99,9 @@ void finalizeBlock(Container container, BlockID blockId) /** @return the threshold to read using memory mapped buffers. */ int getReadMappedBufferThreshold(); + /** @return the max count of memory mapped buffers to read. */ + int getReadMappedBufferMaxCount(); + /** * Shutdown ContainerManager. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 26719d7f035..d9edd6d4cb0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -17,27 +17,22 @@ */ package org.apache.hadoop.ozone.container.metadata; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; -import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; -import org.rocksdb.InfoLogLevel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,14 +40,11 @@ import java.io.IOException; import java.util.NoSuchElementException; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; -import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; - /** * Implementation of the {@link DatanodeStore} interface that contains * functionality common to all more derived datanode store implementations. */ -public abstract class AbstractDatanodeStore implements DatanodeStore { +public class AbstractDatanodeStore extends AbstractRDBStore implements DatanodeStore { private Table metadataTable; @@ -68,12 +60,6 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { public static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); - private volatile DBStore store; - private final AbstractDatanodeDBDefinition dbDef; - private final ManagedColumnFamilyOptions cfOptions; - - private static DatanodeDBProfile dbProfile; - private final boolean openReadOnly; /** * Constructs the metadata store and starts the DB services. @@ -84,114 +70,64 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { protected AbstractDatanodeStore(ConfigurationSource config, AbstractDatanodeDBDefinition dbDef, boolean openReadOnly) throws IOException { - - dbProfile = DatanodeDBProfile - .getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); - - // The same config instance is used on each datanode, so we can share the - // corresponding column family options, providing a single shared cache - // for all containers on a datanode. - cfOptions = dbProfile.getColumnFamilyOptions(config); - - this.dbDef = dbDef; - this.openReadOnly = openReadOnly; - start(config); + super(dbDef, config, openReadOnly); } @Override - public void start(ConfigurationSource config) + protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, ConfigurationSource config) throws IOException { - if (this.store == null) { - ManagedDBOptions options = dbProfile.getDBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - if (this.dbDef instanceof DatanodeSchemaOneDBDefinition || - this.dbDef instanceof DatanodeSchemaTwoDBDefinition) { - long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2)); - options.setMaxTotalWalSize(maxWalSize); - } - - DatanodeConfiguration dc = - config.getObject(DatanodeConfiguration.class); - // Config user log files - InfoLogLevel level = InfoLogLevel.valueOf( - dc.getRocksdbLogLevel() + "_LEVEL"); - options.setInfoLogLevel(level); - options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); - options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); - - if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { - options.setDeleteObsoleteFilesPeriodMicros( - dc.getRocksdbDeleteObsoleteFilesPeriod()); - - // For V3, all Rocksdb dir has the same "container.db" name. So use - // parentDirName(storage UUID)-dbDirName as db metrics name - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + - dbDef.getName()) - .build(); - } else { - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .build(); - } + AbstractDatanodeDBDefinition dbDefinition = this.getDbDef(); + if (dbDefinition instanceof DatanodeSchemaOneDBDefinition || + dbDefinition instanceof DatanodeSchemaTwoDBDefinition) { + long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2)); + options.setMaxTotalWalSize(maxWalSize); + } + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); - // Use the DatanodeTable wrapper to disable the table iterator on - // existing Table implementations retrieved from the DBDefinition. - // See the DatanodeTable's Javadoc for an explanation of why this is - // necessary. - metadataTable = new DatanodeTable<>( - dbDef.getMetadataColumnFamily().getTable(this.store)); - checkTableStatus(metadataTable, metadataTable.getName()); - - // The block iterator this class returns will need to use the table - // iterator internally, so construct a block data table instance - // that does not have the iterator disabled by DatanodeTable. - blockDataTableWithIterator = - dbDef.getBlockDataColumnFamily().getTable(this.store); - - blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); - checkTableStatus(blockDataTable, blockDataTable.getName()); - - if (dbDef.getFinalizeBlocksColumnFamily() != null) { - finalizeBlocksTableWithIterator = - dbDef.getFinalizeBlocksColumnFamily().getTable(this.store); - - finalizeBlocksTable = new DatanodeTable<>( - finalizeBlocksTableWithIterator); - checkTableStatus(finalizeBlocksTable, finalizeBlocksTable.getName()); - } + if (dbDefinition instanceof DatanodeSchemaThreeDBDefinition) { + options.setDeleteObsoleteFilesPeriodMicros( + dc.getRocksdbDeleteObsoleteFilesPeriod()); - if (dbDef.getLastChunkInfoColumnFamily() != null) { - lastChunkInfoTable = new DatanodeTable<>( - dbDef.getLastChunkInfoColumnFamily().getTable(this.store)); - checkTableStatus(lastChunkInfoTable, lastChunkInfoTable.getName()); - } + // For V3, all Rocksdb dir has the same "container.db" name. So use + // parentDirName(storage UUID)-dbDirName as db metrics name + dbStoreBuilder.setDBJmxBeanNameName(dbDefinition.getDBLocation(config).getName() + "-" + + dbDefinition.getName()); } - } - - @Override - public synchronized void stop() throws Exception { - if (store != null) { - store.close(); - store = null; + DBStore dbStore = dbStoreBuilder.setDBOptions(options).build(); + + // Use the DatanodeTable wrapper to disable the table iterator on + // existing Table implementations retrieved from the DBDefinition. + // See the DatanodeTable's Javadoc for an explanation of why this is + // necessary. + metadataTable = new DatanodeTable<>( + dbDefinition.getMetadataColumnFamily().getTable(dbStore)); + checkTableStatus(metadataTable, metadataTable.getName()); + + // The block iterator this class returns will need to use the table + // iterator internally, so construct a block data table instance + // that does not have the iterator disabled by DatanodeTable. + blockDataTableWithIterator = + dbDefinition.getBlockDataColumnFamily().getTable(dbStore); + + blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); + checkTableStatus(blockDataTable, blockDataTable.getName()); + + if (dbDefinition.getFinalizeBlocksColumnFamily() != null) { + finalizeBlocksTableWithIterator = + dbDefinition.getFinalizeBlocksColumnFamily().getTable(dbStore); + + finalizeBlocksTable = new DatanodeTable<>( + finalizeBlocksTableWithIterator); + checkTableStatus(finalizeBlocksTable, finalizeBlocksTable.getName()); } - } - @Override - public DBStore getStore() { - return this.store; - } - - @Override - public BatchOperationHandler getBatchHandler() { - return this.store; + if (dbDefinition.getLastChunkInfoColumnFamily() != null) { + lastChunkInfoTable = new DatanodeTable<>( + dbDefinition.getLastChunkInfoColumnFamily().getTable(dbStore)); + checkTableStatus(lastChunkInfoTable, lastChunkInfoTable.getName()); + } + return dbStore; } @Override @@ -240,44 +176,6 @@ public BlockIterator getFinalizeBlockIterator(long containerID, finalizeBlocksTableWithIterator.iterator(), filter); } - @Override - public synchronized boolean isClosed() { - if (this.store == null) { - return true; - } - return this.store.isClosed(); - } - - @Override - public void close() throws IOException { - this.store.close(); - this.cfOptions.close(); - } - - @Override - public void flushDB() throws IOException { - store.flushDB(); - } - - @Override - public void flushLog(boolean sync) throws IOException { - store.flushLog(sync); - } - - @Override - public void compactDB() throws IOException { - store.compactDB(); - } - - @VisibleForTesting - public DatanodeDBProfile getDbProfile() { - return dbProfile; - } - - protected AbstractDatanodeDBDefinition getDbDef() { - return this.dbDef; - } - protected Table getBlockDataTableWithIterator() { return this.blockDataTableWithIterator; } @@ -300,9 +198,9 @@ protected static void checkTableStatus(Table table, String name) /** * Block Iterator for KeyValue Container. This block iterator returns blocks - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockIterator implements @@ -405,9 +303,9 @@ public void close() throws IOException { /** * Block localId Iterator for KeyValue Container. * This Block localId iterator returns localIds - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockLocalIdIterator implements diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java new file mode 100644 index 00000000000..5ce1a85b388 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java @@ -0,0 +1,135 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; +import org.rocksdb.InfoLogLevel; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; +import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; + +/** + * Abstract Interface defining the way to interact with any rocksDB in the datanode. + * @param Generic parameter defining the schema for the DB. + */ +public abstract class AbstractRDBStore implements DBStoreManager { + private final DEF dbDef; + private final ManagedColumnFamilyOptions cfOptions; + private static DatanodeDBProfile dbProfile; + private final boolean openReadOnly; + private volatile DBStore store; + + protected AbstractRDBStore(DEF dbDef, ConfigurationSource config, boolean openReadOnly) throws IOException { + dbProfile = DatanodeDBProfile.getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); + + // The same config instance is used on each datanode, so we can share the + // corresponding column family options, providing a single shared cache + // for all containers on a datanode. + cfOptions = dbProfile.getColumnFamilyOptions(config); + this.dbDef = dbDef; + this.openReadOnly = openReadOnly; + start(config); + } + + public void start(ConfigurationSource config) + throws IOException { + if (this.store == null) { + ManagedDBOptions options = dbProfile.getDBOptions(); + options.setCreateIfMissing(true); + options.setCreateMissingColumnFamilies(true); + + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); + // Config user log files + InfoLogLevel level = InfoLogLevel.valueOf( + dc.getRocksdbLogLevel() + "_LEVEL"); + options.setInfoLogLevel(level); + options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); + options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); + this.store = initDBStore(DBStoreBuilder.newBuilder(config, dbDef) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly), options, config); + } + } + + protected abstract DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, + ConfigurationSource config) throws IOException; + + public synchronized void stop() throws Exception { + if (store != null) { + store.close(); + store = null; + } + } + + public DBStore getStore() { + return this.store; + } + + public synchronized boolean isClosed() { + if (this.store == null) { + return true; + } + return this.store.isClosed(); + } + + public BatchOperationHandler getBatchHandler() { + return this.store; + } + + public void close() throws IOException { + this.store.close(); + this.cfOptions.close(); + } + + public void flushDB() throws IOException { + store.flushDB(); + } + + public void flushLog(boolean sync) throws IOException { + store.flushLog(sync); + } + + public void compactDB() throws IOException { + store.compactDB(); + } + + @VisibleForTesting + public DatanodeDBProfile getDbProfile() { + return dbProfile; + } + + protected DEF getDbDef() { + return this.dbDef; + } + +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java new file mode 100644 index 00000000000..ec9849950a0 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.metadata; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.DBStore; + +import java.io.Closeable; +import java.io.IOException; + +/** + * Interface for interacting with datanode databases. + */ +public interface DBStoreManager extends Closeable { + + /** + * Start datanode manager. + * + * @param configuration - Configuration + * @throws IOException - Unable to start datanode store. + */ + void start(ConfigurationSource configuration) throws IOException; + + /** + * Stop datanode manager. + */ + void stop() throws Exception; + + /** + * Get datanode store. + * + * @return datanode store. + */ + DBStore getStore(); + + /** + * Helper to create and write batch transactions. + */ + BatchOperationHandler getBatchHandler(); + + void flushLog(boolean sync) throws IOException; + + void flushDB() throws IOException; + + void compactDB() throws IOException; + + /** + * Returns if the underlying DB is closed. This call is thread safe. + * @return true if the DB is closed. + */ + boolean isClosed(); + + default void compactionIfNeeded() throws Exception { + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java index 4f54e85da2b..bd1c0fb368a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java @@ -51,27 +51,21 @@ public class DatanodeSchemaOneDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETED_BLOCKS = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - ChunkInfoList.class, SchemaOneChunkInfoListCodec.get()); private static final Map>> diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java index d47446d49b0..10537ca6f2d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java @@ -59,45 +59,35 @@ public class DatanodeSchemaThreeDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( "block_data", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( "metadata", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", - String.class, FixedLengthStringCodec.get(), - DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); private static String separator = ""; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java index b9e7ec7bd5b..bf6b1d0a29c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.metadata; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec; @@ -44,45 +43,35 @@ public class DatanodeSchemaTwoDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( "block_data", - String.class, StringCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( "metadata", - String.class, StringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", - Long.class, LongCodec.get(), - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); public DatanodeSchemaTwoDBDefinition(String dbPath, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java index d791d9bbeab..3ebdc3f6295 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java @@ -17,22 +17,16 @@ */ package org.apache.hadoop.ozone.container.metadata; -import com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; -import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import java.io.Closeable; import java.io.IOException; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; @@ -40,31 +34,10 @@ /** * Interface for interacting with datanode databases. */ -public interface DatanodeStore extends Closeable { +public interface DatanodeStore extends DBStoreManager { String NO_SUCH_BLOCK_ERR_MSG = "Unable to find the block."; - /** - * Start datanode manager. - * - * @param configuration - Configuration - * @throws IOException - Unable to start datanode store. - */ - void start(ConfigurationSource configuration) throws IOException; - - /** - * Stop datanode manager. - */ - void stop() throws Exception; - - /** - * Get datanode store. - * - * @return datanode store. - */ - @VisibleForTesting - DBStore getStore(); - /** * A Table that keeps the block data. * @@ -100,17 +73,6 @@ public interface DatanodeStore extends Closeable { */ Table getLastChunkInfoTable(); - /** - * Helper to create and write batch transactions. - */ - BatchOperationHandler getBatchHandler(); - - void flushLog(boolean sync) throws IOException; - - void flushDB() throws IOException; - - void compactDB() throws IOException; - BlockIterator getBlockIterator(long containerID) throws IOException; @@ -120,15 +82,6 @@ BlockIterator getBlockIterator(long containerID, BlockIterator getFinalizeBlockIterator(long containerID, KeyPrefixFilter filter) throws IOException; - /** - * Returns if the underlying DB is closed. This call is thread safe. - * @return true if the DB is closed. - */ - boolean isClosed(); - - default void compactionIfNeeded() throws Exception { - } - default BlockData getBlockByID(BlockID blockID, String blockKey) throws IOException { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java index 4beb2075432..25a49eaabe4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java @@ -25,7 +25,8 @@ import java.io.IOException; /** - * Codec for parsing {@link ContainerProtos.ChunkInfoList} objects from data + * Codec for parsing {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfoList} + * objects from data * that may have been written using schema version one. Before upgrading * schema versions, deleted block IDs were stored with a duplicate copy of * their ID as the value in the database. After upgrading the code, any @@ -56,6 +57,11 @@ private SchemaOneChunkInfoListCodec() { // singleton } + @Override + public Class getTypeClass() { + return ChunkInfoList.class; + } + @Override public byte[] toPersistedFormat(ChunkInfoList chunkList) { return chunkList.getProtoBufMessage().toByteArray(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java index 2f1660f4d2e..add24874a31 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java @@ -48,6 +48,11 @@ private SchemaOneKeyCodec() { // singleton } + @Override + public Class getTypeClass() { + return String.class; + } + @Override public byte[] toPersistedFormat(String stringObject) throws IOException { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java new file mode 100644 index 00000000000..a15ab27a69d --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java @@ -0,0 +1,71 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.LongCodec; +import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.ozone.OzoneConsts; + +import java.util.Map; + +/** + * Class for defining the schema for master volume in a datanode. + */ +public final class WitnessedContainerDBDefinition extends DBDefinition.WithMap { + + private static final String CONTAINER_IDS_TABLE_NAME = "containerIds"; + + public static final DBColumnFamilyDefinition + CONTAINER_IDS_TABLE = new DBColumnFamilyDefinition<>( + CONTAINER_IDS_TABLE_NAME, + LongCodec.get(), + StringCodec.get()); + + private static final Map> + COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( + CONTAINER_IDS_TABLE); + + private static final WitnessedContainerDBDefinition INSTANCE = new WitnessedContainerDBDefinition(); + + public static WitnessedContainerDBDefinition get() { + return INSTANCE; + } + + private WitnessedContainerDBDefinition() { + super(COLUMN_FAMILIES); + } + + @Override + public String getName() { + return OzoneConsts.WITNESSED_CONTAINER_DB_NAME; + } + + @Override + public String getLocationConfigKey() { + return ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; + } + + public DBColumnFamilyDefinition getContainerIdsTable() { + return CONTAINER_IDS_TABLE; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java new file mode 100644 index 00000000000..b16c7b981ce --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java @@ -0,0 +1,34 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.utils.db.Table; + +/** + * Interface for interacting with database in the master volume of a datanode. + */ +public interface WitnessedContainerMetadataStore extends DBStoreManager { + /** + * A Table that keeps the containerIds in a datanode. + * + * @return Table + */ + Table getContainerIdsTable(); +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java new file mode 100644 index 00000000000..270daf815b2 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java @@ -0,0 +1,78 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Class for interacting with database in the master volume of a datanode. + */ +public final class WitnessedContainerMetadataStoreImpl extends AbstractRDBStore + implements WitnessedContainerMetadataStore { + + private Table containerIdsTable; + private static final ConcurrentMap INSTANCES = + new ConcurrentHashMap<>(); + + public static WitnessedContainerMetadataStore get(ConfigurationSource conf) + throws IOException { + String dbDirPath = DBStoreBuilder.getDBDirPath(WitnessedContainerDBDefinition.get(), conf).getAbsolutePath(); + try { + return INSTANCES.compute(dbDirPath, (k, v) -> { + if (v == null || v.isClosed()) { + try { + return new WitnessedContainerMetadataStoreImpl(conf, false); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return v; + }); + } catch (UncheckedIOException e) { + throw e.getCause(); + } + } + + private WitnessedContainerMetadataStoreImpl(ConfigurationSource config, boolean openReadOnly) throws IOException { + super(WitnessedContainerDBDefinition.get(), config, openReadOnly); + } + + @Override + protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, ConfigurationSource config) + throws IOException { + DBStore dbStore = dbStoreBuilder.build(); + this.containerIdsTable = this.getDbDef().getContainerIdsTable().getTable(dbStore); + return dbStore; + } + + @Override + public Table getContainerIdsTable() { + return containerIdsTable; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java index 1a4f0bf6460..af810c62842 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java @@ -62,6 +62,7 @@ public BackgroundContainerDataScanner(ContainerScannerConfiguration conf, throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume()); canceler = new Canceler(); this.metrics = ContainerDataScannerMetrics.create(volume.toString()); + this.metrics.setStorageDirectory(volume.toString()); this.minScanGap = conf.getContainerScanMinGap(); this.checksumManager = checksumManager; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index a49cb7278a7..94841c9d2ea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -167,7 +167,6 @@ public void closeContainer(final long containerId) throws IOException { * Returns the Container given a container id. * * @param containerId ID of the container - * @return Container */ public void addFinalizedBlock(final long containerId, final long localId) { @@ -250,6 +249,16 @@ public Iterator> getContainers(HddsVolume volume) { return containerSet.getContainerIterator(volume); } + /** + * Get the number of containers based on the given volume. + * + * @param volume hdds volume. + * @return number of containers. + */ + public long getContainerCount(HddsVolume volume) { + return containerSet.containerCount(volume); + } + void updateDataScanTimestamp(long containerId, Instant timestamp) throws IOException { Container container = containerSet.getContainer(containerId); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java index a3f71d34ba1..76e71312aed 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java @@ -37,6 +37,8 @@ public final class ContainerDataScannerMetrics @Metric("disk bandwidth used by the container data scanner per volume") private MutableRate numBytesScanned; + private String storageDirectory; + public double getNumBytesScannedMean() { return numBytesScanned.lastStat().mean(); } @@ -66,4 +68,13 @@ public static ContainerDataScannerMetrics create(final String volumeName) { return ms.register(name, null, new ContainerDataScannerMetrics(name, ms)); } + + @Metric("Returns the Directory name for the volume") + public String getStorageDirectory() { + return storageDirectory; + } + + public void setStorageDirectory(final String volumeName) { + this.storageDirectory = volumeName; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 1685d1c5fe2..027fbff89c8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -320,8 +320,7 @@ private void resolveDuplicate(KeyValueContainer existing, private void swapAndRemoveContainer(KeyValueContainer existing, KeyValueContainer toAdd) throws IOException { - containerSet.removeContainer( - existing.getContainerData().getContainerID()); + containerSet.removeContainerOnlyFromMemory(existing.getContainerData().getContainerID()); containerSet.addContainer(toAdd); KeyValueContainerUtil.removeContainer(existing.getContainerData(), hddsVolume.getConf()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java index eb0f3eedb03..df5050266bd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java @@ -80,6 +80,9 @@ public static synchronized void init( } private static boolean shouldScan(Container container) { + if (container == null) { + return false; + } long containerID = container.getContainerData().getContainerID(); if (instance == null) { LOG.debug("Skipping on demand scan for container {} since scanner was " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index cb7db07c24f..c6eaeebfd4a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -29,12 +29,16 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.BlockDeletingService; @@ -58,6 +62,8 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume.VolumeType; import org.apache.hadoop.ozone.container.common.volume.StorageVolumeChecker; import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.StaleRecoveringContainerScrubbingService; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ReplicationServer; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; @@ -71,6 +77,7 @@ import java.io.IOException; import java.time.Duration; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -131,6 +138,7 @@ public class OzoneContainer { private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; + private WitnessedContainerMetadataStore witnessedContainerMetadataStore; enum InitializingStatus { UNINITIALIZED, INITIALIZING, INITIALIZED @@ -181,12 +189,11 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, TimeUnit.MINUTES); } } - long recoveringContainerTimeout = config.getTimeDuration( OZONE_RECOVERING_CONTAINER_TIMEOUT, OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - - containerSet = new ContainerSet(recoveringContainerTimeout); + this.witnessedContainerMetadataStore = WitnessedContainerMetadataStoreImpl.get(conf); + containerSet = new ContainerSet(witnessedContainerMetadataStore.getContainerIdsTable(), recoveringContainerTimeout); metadataScanner = null; metrics = ContainerMetrics.create(conf); @@ -309,7 +316,7 @@ public GrpcTlsConfig getTlsClientConfig() { * Build's container map after volume format. */ @VisibleForTesting - public void buildContainerSet() { + public void buildContainerSet() throws IOException { Iterator volumeSetIterator = volumeSet.getVolumesList() .iterator(); ArrayList volumeThreads = new ArrayList<>(); @@ -337,6 +344,14 @@ public void buildContainerSet() { for (int i = 0; i < volumeThreads.size(); i++) { volumeThreads.get(i).join(); } + try (TableIterator> itr = + containerSet.getContainerIdsTable().iterator()) { + Map containerIds = new HashMap<>(); + while (itr.hasNext()) { + containerIds.put(itr.next().getKey(), 0L); + } + containerSet.buildMissingContainerSetAndValidate(containerIds); + } } catch (InterruptedException ex) { LOG.error("Volume Threads Interrupted exception", ex); Thread.currentThread().interrupt(); @@ -392,6 +407,18 @@ private void initContainerScanner(ContainerScannerConfiguration c) { } } + /** + * We need to inject the containerController into the hddsVolume. + * because we need to obtain the container count + * for each disk based on the container controller. + */ + private void initHddsVolumeContainer() { + for (StorageVolume v : volumeSet.getVolumesList()) { + HddsVolume hddsVolume = (HddsVolume) v; + hddsVolume.setController(controller); + } + } + private void initMetadataScanner(ContainerScannerConfiguration c) { if (this.metadataScanner == null) { this.metadataScanner = @@ -483,13 +510,15 @@ public void start(String clusterId) throws IOException { replicationServer.start(); datanodeDetails.setPort(Name.REPLICATION, replicationServer.getPort()); - writeChannel.start(); - readChannel.start(); hddsDispatcher.init(); hddsDispatcher.setClusterId(clusterId); + writeChannel.start(); + readChannel.start(); blockDeletingService.start(); recoveringContainerScrubbingService.start(); + initHddsVolumeContainer(); + // mark OzoneContainer as INITIALIZED. initializingStatus.set(InitializingStatus.INITIALIZED); } @@ -517,11 +546,21 @@ public void stop() { } blockDeletingService.shutdown(); recoveringContainerScrubbingService.shutdown(); + IOUtils.closeQuietly(metrics); ContainerMetrics.remove(); checksumTreeManager.stop(); + if (this.witnessedContainerMetadataStore != null) { + try { + this.witnessedContainerMetadataStore.stop(); + } catch (Exception e) { + LOG.error("Error while stopping witnessedContainerMetadataStore. Status of store: {}", + witnessedContainerMetadataStore.isClosed(), e); + } + this.witnessedContainerMetadataStore = null; + } } - public void handleVolumeFailures() { + public void handleVolumeFailures() throws StorageContainerException { if (containerSet != null) { containerSet.handleVolumeFailures(context); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java index 72fa88b35d9..f4bf54a3d82 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java @@ -70,6 +70,10 @@ protected AbstractReplicationTask(long containerID, this.term = term; queued = Instant.now(clock); } + + protected abstract String getMetricName(); + + protected abstract String getMetricDescriptionSegment(); public long getContainerId() { return containerId; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 9e5b5dbdabd..db86882bfb8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -128,7 +128,7 @@ public void importContainer(long containerID, Path tarFilePath, try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { Container container = controller.importContainer( containerData, input, packer); - containerSet.addContainer(container); + containerSet.addContainerByOverwriteMissingContainer(container); } } finally { importContainerProgress.remove(containerID); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java index 6bc237207b3..26cd0d82a99 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.container.replication; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.util.HashSet; import java.util.Set; @@ -59,37 +58,24 @@ public class GrpcReplicationService extends private final ContainerReplicationSource source; private final ContainerImporter importer; - private final boolean zeroCopyEnabled; - private final ZeroCopyMessageMarshaller sendContainerZeroCopyMessageMarshaller; private final ZeroCopyMessageMarshaller copyContainerZeroCopyMessageMarshaller; - public GrpcReplicationService(ContainerReplicationSource source, - ContainerImporter importer, boolean zeroCopyEnabled) { + public GrpcReplicationService(ContainerReplicationSource source, ContainerImporter importer) { this.source = source; this.importer = importer; - this.zeroCopyEnabled = zeroCopyEnabled; - - if (zeroCopyEnabled) { - sendContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( - SendContainerRequest.getDefaultInstance()); - copyContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( - CopyContainerRequestProto.getDefaultInstance()); - } else { - sendContainerZeroCopyMessageMarshaller = null; - copyContainerZeroCopyMessageMarshaller = null; - } + + sendContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + SendContainerRequest.getDefaultInstance()); + copyContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + CopyContainerRequestProto.getDefaultInstance()); } public ServerServiceDefinition bindServiceWithZeroCopy() { ServerServiceDefinition orig = super.bindService(); - if (!zeroCopyEnabled) { - LOG.info("Zerocopy is not enabled."); - return orig; - } Set methodNames = new HashSet<>(); ServerServiceDefinition.Builder builder = @@ -155,14 +141,7 @@ public void download(CopyContainerRequestProto request, } finally { // output may have already been closed, ignore such errors IOUtils.cleanupWithLogger(LOG, outputStream); - - if (copyContainerZeroCopyMessageMarshaller != null) { - InputStream popStream = - copyContainerZeroCopyMessageMarshaller.popStream(request); - if (popStream != null) { - IOUtils.cleanupWithLogger(LOG, popStream); - } - } + copyContainerZeroCopyMessageMarshaller.release(request); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java index b4e92a4a60a..6ca474bdd8a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java @@ -99,13 +99,12 @@ public ReplicationServer(ContainerController controller, new LinkedBlockingQueue<>(replicationQueueLimit), threadFactory); - init(replicationConfig.isZeroCopyEnable()); + init(); } - public void init(boolean enableZeroCopy) { + public void init() { GrpcReplicationService grpcReplicationService = new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller), importer, - enableZeroCopy); + new OnDemandContainerReplicationSource(controller), importer); NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) .addService(ServerInterceptors.intercept( @@ -203,11 +202,6 @@ public static final class ReplicationConfig { static final String REPLICATION_OUTOFSERVICE_FACTOR_KEY = PREFIX + "." + OUTOFSERVICE_FACTOR_KEY; - public static final String ZEROCOPY_ENABLE_KEY = "zerocopy.enabled"; - private static final boolean ZEROCOPY_ENABLE_DEFAULT = true; - private static final String ZEROCOPY_ENABLE_DEFAULT_VALUE = - "true"; - /** * The maximum number of replication commands a single datanode can execute * simultaneously. @@ -249,15 +243,6 @@ public static final class ReplicationConfig { ) private double outOfServiceFactor = OUTOFSERVICE_FACTOR_DEFAULT; - @Config(key = ZEROCOPY_ENABLE_KEY, - type = ConfigType.BOOLEAN, - defaultValue = ZEROCOPY_ENABLE_DEFAULT_VALUE, - tags = {DATANODE, SCM}, - description = "Specify if zero-copy should be enabled for " + - "replication protocol." - ) - private boolean zeroCopyEnable = ZEROCOPY_ENABLE_DEFAULT; - public double getOutOfServiceFactor() { return outOfServiceFactor; } @@ -291,14 +276,6 @@ public void setReplicationQueueLimit(int limit) { this.replicationQueueLimit = limit; } - public boolean isZeroCopyEnable() { - return zeroCopyEnable; - } - - public void setZeroCopyEnable(boolean zeroCopyEnable) { - this.zeroCopyEnable = zeroCopyEnable; - } - @PostConstruct public void validate() { if (replicationMaxStreams < 1) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index 5ceea125e81..9513cac84ef 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -26,6 +26,7 @@ import java.util.Objects; import java.util.OptionalLong; import java.util.Set; +import java.util.Collections; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.PriorityBlockingQueue; @@ -42,6 +43,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; @@ -49,6 +52,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,11 +75,21 @@ public final class ReplicationSupervisor { private final StateContext context; private final Clock clock; - private final AtomicLong requestCounter = new AtomicLong(); - private final AtomicLong successCounter = new AtomicLong(); - private final AtomicLong failureCounter = new AtomicLong(); - private final AtomicLong timeoutCounter = new AtomicLong(); - private final AtomicLong skippedCounter = new AtomicLong(); + private final Map requestCounter = new ConcurrentHashMap<>(); + private final Map successCounter = new ConcurrentHashMap<>(); + private final Map failureCounter = new ConcurrentHashMap<>(); + private final Map timeoutCounter = new ConcurrentHashMap<>(); + private final Map skippedCounter = new ConcurrentHashMap<>(); + private final Map queuedCounter = new ConcurrentHashMap<>(); + + private final MetricsRegistry registry; + private final Map opsLatencyMs = new ConcurrentHashMap<>(); + + private static final Map METRICS_MAP; + + static { + METRICS_MAP = new HashMap<>(); + } /** * A set of container IDs that are currently being downloaded @@ -188,6 +202,10 @@ public static Builder newBuilder() { return new Builder(); } + public static Map getMetricsMap() { + return Collections.unmodifiableMap(METRICS_MAP); + } + private ReplicationSupervisor(StateContext context, ExecutorService executor, ReplicationConfig replicationConfig, DatanodeConfiguration datanodeConfig, Clock clock, IntConsumer executorThreadUpdater) { @@ -207,6 +225,7 @@ private ReplicationSupervisor(StateContext context, ExecutorService executor, nodeStateUpdated(dn.getPersistedOpState()); } } + registry = new MetricsRegistry(ReplicationSupervisor.class.getSimpleName()); } /** @@ -221,6 +240,22 @@ public void addTask(AbstractReplicationTask task) { return; } + if (requestCounter.get(task.getMetricName()) == null) { + synchronized (this) { + if (requestCounter.get(task.getMetricName()) == null) { + requestCounter.put(task.getMetricName(), new AtomicLong(0)); + successCounter.put(task.getMetricName(), new AtomicLong(0)); + failureCounter.put(task.getMetricName(), new AtomicLong(0)); + timeoutCounter.put(task.getMetricName(), new AtomicLong(0)); + skippedCounter.put(task.getMetricName(), new AtomicLong(0)); + queuedCounter.put(task.getMetricName(), new AtomicLong(0)); + opsLatencyMs.put(task.getMetricName(), registry.newRate( + task.getClass().getSimpleName() + "Ms")); + METRICS_MAP.put(task.getMetricName(), task.getMetricDescriptionSegment()); + } + } + } + if (inFlight.add(task)) { if (task.getPriority() != ReplicationCommandPriority.LOW) { // Low priority tasks are not included in the replication queue sizes @@ -229,6 +264,7 @@ public void addTask(AbstractReplicationTask task) { taskCounter.computeIfAbsent(task.getClass(), k -> new AtomicInteger()).incrementAndGet(); } + queuedCounter.get(task.getMetricName()).incrementAndGet(); executor.execute(new TaskRunner(task)); } } @@ -329,15 +365,16 @@ public TaskRunner(AbstractReplicationTask task) { @Override public void run() { + final long startTime = Time.monotonicNow(); try { - requestCounter.incrementAndGet(); + requestCounter.get(task.getMetricName()).incrementAndGet(); final long now = clock.millis(); final long deadline = task.getDeadline(); if (deadline > 0 && now > deadline) { LOG.info("Ignoring {} since the deadline has passed ({} < {})", this, Instant.ofEpochMilli(deadline), Instant.ofEpochMilli(now)); - timeoutCounter.incrementAndGet(); + timeoutCounter.get(task.getMetricName()).incrementAndGet(); return; } @@ -364,19 +401,21 @@ public void run() { task.runTask(); if (task.getStatus() == Status.FAILED) { LOG.warn("Failed {}", this); - failureCounter.incrementAndGet(); + failureCounter.get(task.getMetricName()).incrementAndGet(); } else if (task.getStatus() == Status.DONE) { LOG.info("Successful {}", this); - successCounter.incrementAndGet(); + successCounter.get(task.getMetricName()).incrementAndGet(); } else if (task.getStatus() == Status.SKIPPED) { LOG.info("Skipped {}", this); - skippedCounter.incrementAndGet(); + skippedCounter.get(task.getMetricName()).incrementAndGet(); } } catch (Exception e) { task.setStatus(Status.FAILED); LOG.warn("Failed {}", this, e); - failureCounter.incrementAndGet(); + failureCounter.get(task.getMetricName()).incrementAndGet(); } finally { + queuedCounter.get(task.getMetricName()).decrementAndGet(); + opsLatencyMs.get(task.getMetricName()).add(Time.monotonicNow() - startTime); inFlight.remove(task); decrementTaskCounter(task); } @@ -419,7 +458,12 @@ public boolean equals(Object o) { } public long getReplicationRequestCount() { - return requestCounter.get(); + return getCount(requestCounter); + } + + public long getReplicationRequestCount(String metricsName) { + AtomicLong counter = requestCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getQueueSize() { @@ -438,20 +482,66 @@ public long getMaxReplicationStreams() { } } + private long getCount(Map counter) { + long total = 0; + for (Map.Entry entry : counter.entrySet()) { + total += entry.getValue().get(); + } + return total; + } + public long getReplicationSuccessCount() { - return successCounter.get(); + return getCount(successCounter); + } + + public long getReplicationSuccessCount(String metricsName) { + AtomicLong counter = successCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getReplicationFailureCount() { - return failureCounter.get(); + return getCount(failureCounter); + } + + public long getReplicationFailureCount(String metricsName) { + AtomicLong counter = failureCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getReplicationTimeoutCount() { - return timeoutCounter.get(); + return getCount(timeoutCounter); + } + + public long getReplicationTimeoutCount(String metricsName) { + AtomicLong counter = timeoutCounter.get(metricsName); + return counter != null ? counter.get() : 0; } public long getReplicationSkippedCount() { - return skippedCounter.get(); + return getCount(skippedCounter); + } + + public long getReplicationSkippedCount(String metricsName) { + AtomicLong counter = skippedCounter.get(metricsName); + return counter != null ? counter.get() : 0; } + public long getReplicationQueuedCount() { + return getCount(queuedCounter); + } + + public long getReplicationQueuedCount(String metricsName) { + AtomicLong counter = queuedCounter.get(metricsName); + return counter != null ? counter.get() : 0; + } + + public long getReplicationRequestAvgTime(String metricsName) { + MutableRate rate = opsLatencyMs.get(metricsName); + return rate != null ? (long) rate.lastStat().mean() : 0; + } + + public long getReplicationRequestTotalTime(String metricsName) { + MutableRate rate = opsLatencyMs.get(metricsName); + return rate != null ? (long) rate.lastStat().total() : 0; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java index 671e985d7ad..cd1103a0c46 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java @@ -67,20 +67,54 @@ public void getMetrics(MetricsCollector collector, boolean all) { supervisor.getTotalInFlightReplications()) .addGauge(Interns.info("numQueuedReplications", "Number of replications in queue"), - supervisor.getQueueSize()) + supervisor.getReplicationQueuedCount()) .addGauge(Interns.info("numRequestedReplications", "Number of requested replications"), supervisor.getReplicationRequestCount()) + .addGauge(Interns.info("numSuccessReplications", + "Number of successful replications"), + supervisor.getReplicationSuccessCount()) + .addGauge(Interns.info("numFailureReplications", + "Number of failure replications"), + supervisor.getReplicationFailureCount()) .addGauge(Interns.info("numTimeoutReplications", "Number of replication requests timed out before being processed"), supervisor.getReplicationTimeoutCount()) .addGauge(Interns.info("numSkippedReplications", "Number of replication requests skipped as the container is " - + "already present"), supervisor.getReplicationSkippedCount()) + + "already present"), + supervisor.getReplicationSkippedCount()) .addGauge(Interns.info("maxReplicationStreams", "Maximum number of " + "concurrent replication tasks which can run simultaneously"), supervisor.getMaxReplicationStreams()); + Map metricsMap = ReplicationSupervisor.getMetricsMap(); + if (!metricsMap.isEmpty()) { + metricsMap.forEach((metricsName, descriptionSegment) -> { + if (!metricsName.equals("")) { + builder.addGauge(Interns.info("numRequested" + metricsName, + "Number of requested " + descriptionSegment), + supervisor.getReplicationRequestCount(metricsName)) + .addGauge(Interns.info("numSuccess" + metricsName, + "Number of successful " + descriptionSegment), + supervisor.getReplicationSuccessCount(metricsName)) + .addGauge(Interns.info("numFailure" + metricsName, + "Number of failure " + descriptionSegment), + supervisor.getReplicationFailureCount(metricsName)) + .addGauge(Interns.info("numTimeout" + metricsName, + "Number of " + descriptionSegment + " timed out before being processed"), + supervisor.getReplicationTimeoutCount(metricsName)) + .addGauge(Interns.info("numSkipped" + metricsName, + "Number of " + descriptionSegment + " skipped as the container is " + + "already present"), + supervisor.getReplicationSkippedCount(metricsName)) + .addGauge(Interns.info("numQueued" + metricsName, + "Number of " + descriptionSegment + " in queue"), + supervisor.getReplicationQueuedCount(metricsName)); + } + }); + } + Map tasks = supervisor.getInFlightReplicationSummary(); for (Map.Entry entry : tasks.entrySet()) { builder.addGauge(Interns.info("numInflight" + entry.getKey(), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java index ca0ca98906c..2168f324c24 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java @@ -65,6 +65,16 @@ protected ReplicationTask( replicator); } + @Override + public String getMetricName() { + return "ContainerReplications"; + } + + @Override + public String getMetricDescriptionSegment() { + return "container replications"; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java index 506a96fe051..40b4dec3493 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java @@ -30,7 +30,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -105,10 +104,7 @@ public void onNext(SendContainerRequest req) { onError(t); } finally { if (marshaller != null) { - InputStream popStream = marshaller.popStream(req); - if (popStream != null) { - IOUtils.cleanupWithLogger(LOG, popStream); - } + marshaller.release(req); } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java index 5fdfc931b99..e49f3c3d6e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java @@ -27,9 +27,9 @@ public interface StreamingSource { /** * - * @param id: custom identifier + * @param id custom identifier * - * @return map of files which should be copied (logical name -> real path) + * @return map of files which should be copied (logical name -> real path) */ Map getFilesToStream(String id) throws InterruptedException; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java index f6633cb9d37..ada80c980f6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.protocol.commands; +import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -140,7 +140,7 @@ public String toString() { .collect(Collectors.joining(", "))).append("]") .append(", targets: ").append(getTargetDatanodes()) .append(", missingIndexes: ").append( - StringUtils.bytes2String(missingContainerIndexes.asReadOnlyByteBuffer())); + Arrays.toString(missingContainerIndexes.toByteArray())); return sb.toString(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index eeb99b5a3db..d6b44f2a641 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -122,7 +122,6 @@ private SCMDatanodeResponse submitRequest(Type type, /** * Returns SCM version. * - * @param unused - set to null and unused. * @return Version info. */ @Override diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html index fd3d7407d23..4f51b423e8a 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html @@ -22,8 +22,32 @@ +

    HeartBeat Information

    + + + + + + + + + + + + + + + + + + + + + +
    AddressLast Successful HeartBeatMissed CountStateTypeVersion Number
    {{scm.addressString}}{{scm.lastSuccessfulHeartbeat}}{{scm.missedCount}}{{scm.state}}{{scm.type}}{{scm.versionNumber}}
    +

    Volume Information

    - +
    @@ -33,6 +57,7 @@

    Volume Information

    + @@ -45,6 +70,7 @@

    Volume Information

    + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html new file mode 100644 index 00000000000..5c54a2aa0a7 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html @@ -0,0 +1,47 @@ + + + + + DataNode Scanner Status + + +

    DataNode Scanner Information

    +
    Directory Available Space Reserved Total CapacityContainers State
    {{volumeInfo.Available}} {{volumeInfo.Reserved}} {{volumeInfo.TotalCapacity}}{{volumeInfo.Containers}} {{volumeInfo["tag.VolumeState"]}}
    + + + + + + + + + + + + + + + + + + + + +
    DirectoryNumBytesScannedNumOpsNumBytesScannedAvgTimeNumContainersScannedNumScanIterationsNumUnHealthyContainers
    {{scanner["tag.StorageDirectory"]}}{{scanner.NumBytesScannedNumOps}}{{scanner.NumBytesScannedAvgTime | millisecondsToMinutes}}{{scanner.NumContainersScanned}}{{scanner.NumScanIterations}}{{scanner.NumUnHealthyContainers}}
    + + \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js index adc507acce9..547e566ef8a 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js @@ -36,20 +36,104 @@ volume.TotalCapacity = transform(volume.TotalCapacity); }) }); + + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=SCMConnectionManager") + .then(function (result) { + ctrl.heartbeatmetrics = result.data.beans; + ctrl.heartbeatmetrics.forEach(scm => { + var scmServers = scm.SCMServers; + scmServers.forEach(scmServer => { + scmServer.lastSuccessfulHeartbeat = convertTimestampToDate(scmServer.lastSuccessfulHeartbeat) + }) + }) + }); } }); - function transform(v) { - var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB']; - var prev = 0, i = 0; - while (Math.floor(v) > 0 && i < UNITS.length) { + + // Register ioStatus Controller + angular.module('ozone').config(function ($routeProvider) { + $routeProvider.when('/iostatus', { + templateUrl: 'iostatus.html', + controller: 'IOStatusController as ioStatusCtrl', + }); + }); + + angular.module('ozone') + .controller('IOStatusController', function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=VolumeIOStats*") + .then(function (result) { + ctrl.dniostatus = result.data.beans; + }); + }); + + // Register Scanner Controller + angular.module('ozone').config(function ($routeProvider) { + $routeProvider.when('/dn-scanner', { + templateUrl: 'dn-scanner.html', + controller: 'DNScannerController as scannerStatusCtrl', + }); + }); + + angular.module('ozone') + .controller('DNScannerController', function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=ContainerDataScannerMetrics*") + .then(function (result) { + ctrl.dnscanner = result.data.beans; + }); + }); + + angular.module('ozone') + .filter('millisecondsToMinutes', function() { + return function(milliseconds) { + if (isNaN(milliseconds)) { + return 'Invalid input'; + } + var minutes = Math.floor(milliseconds / 60000); // 1 minute = 60000 milliseconds + var seconds = Math.floor((milliseconds % 60000) / 1000); + return minutes + ' mins ' + seconds + ' secs'; + }; + }); + + angular.module('ozone') + .filter('twoDecimalPlaces', function() { + return function(input) { + if (isNaN(input)) { + return 'Invalid input'; + } + return parseFloat(input).toFixed(2); + }; + }); + + function transform(v) { + var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB']; + var prev = 0, i = 0; + while (Math.floor(v) > 0 && i < UNITS.length) { prev = v; v /= 1024; i += 1; - } - if (i > 0 && i < UNITS.length) { + } + if (i > 0 && i < UNITS.length) { v = prev; i -= 1; - } - return Math.round(v * 100) / 100 + ' ' + UNITS[i]; } + return Math.round(v * 100) / 100 + ' ' + UNITS[i]; + } + + function convertTimestampToDate(timestamp) { + if (!timestamp) return ''; + var milliseconds = timestamp * 1000; + + var date = new Date(milliseconds); + + var year = date.getFullYear(); + var month = date.getMonth() + 1; + var day = date.getDate(); + var hours = date.getHours(); + var minutes = date.getMinutes(); + var seconds = date.getSeconds(); + + return `${year}-${month.toString().padStart(2, '0')}-${day.toString().padStart(2, '0')} ${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}`; + } })(); diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html index 1c32fe64e0e..0e1cbf21a00 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html @@ -49,11 +49,10 @@ HDDS Datanode Service - - - - + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html new file mode 100644 index 00000000000..94916821bd8 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html @@ -0,0 +1,76 @@ + + + + + DataNode IO Status + + + +

    Read Performance

    + + + + + + + + + + + + + + + + + + + + + + + +
    DirectoryReadBytesReadOpCountReadAvgTimeReadLatency60s(P90)ReadLatency60s(P95)ReadLatency60s(P99)
    {{volumeInfo["tag.StorageDirectory"]}}{{volumeInfo.ReadBytes}}{{volumeInfo.ReadOpCount}}{{volumeInfo.ReadTimeAvgTime | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s90thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s95thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s99thPercentileLatency | twoDecimalPlaces}} ms
    + +

    Write Performance

    + + + + + + + + + + + + + + + + + + + + + + + +
    DirectoryWriteBytesWriteOpCountWriteAvgTimeWriteLatency60s(P90)WriteLatency60s(P95)WriteLatency60s(P99)
    {{volumeInfo["tag.StorageDirectory"]}}{{volumeInfo.WriteBytes}}{{volumeInfo.WriteOpCount}}{{volumeInfo.WriteTimeAvgTime | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s90thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s95thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s99thPercentileLatency | twoDecimalPlaces}} ms
    + + \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 3b9c4a93ec5..e52328bafd0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -163,11 +163,11 @@ public static DatanodeDetails createDatanodeDetails() { .nextInt(256) + "." + random.nextInt(256); DatanodeDetails.Port containerPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 0); + DatanodeDetails.newStandalonePort(0); DatanodeDetails.Port ratisPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, 0); + DatanodeDetails.newRatisPort(0); DatanodeDetails.Port restPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") @@ -414,7 +414,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + dn.getRatisPort().getValue()); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, getNoopContainerDispatcher(), getEmptyContainerController(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index e1e1ee9172a..41be7acbb14 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -198,8 +198,7 @@ public void testDatanodeStateContext() throws IOException, OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); idPath.delete(); DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, + DatanodeDetails.Port port = DatanodeDetails.newStandalonePort( OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); @@ -325,8 +324,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); idPath.delete(); DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, + DatanodeDetails.Port port = DatanodeDetails.newStandalonePort( OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); @@ -408,12 +406,9 @@ public void testDatanodeStateMachineWithInvalidConfiguration() } private DatanodeDetails getNewDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Port streamPort = DatanodeDetails.newPort( DatanodeDetails.Port.Name.RATIS_DATASTREAM, 0); return DatanodeDetails.newBuilder() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java index 4f33e833a3c..f825be46882 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java @@ -38,7 +38,6 @@ import java.nio.charset.StandardCharsets; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.ReadChunk; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse; @@ -92,7 +91,7 @@ public void testTarName() throws IOException { public void testDatanodeIDPersistent(@TempDir File tempDir) throws Exception { // Generate IDs for testing DatanodeDetails id1 = randomDatanodeDetails(); - id1.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 1)); + id1.setPort(DatanodeDetails.newStandalonePort(1)); assertWriteRead(tempDir, id1); // Add certificate serial id. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java index 8a272868146..1a0401de7e8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java @@ -73,7 +73,7 @@ void testWriteReadBeforeRatisDatastreamPortLayoutVersion(@TempDir File dir) // if no separate admin/server/datastream port, return single Ratis one for // compat assertEquals(read.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM), - read.getPort(DatanodeDetails.Port.Name.RATIS)); + read.getRatisPort()); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 05bebdd1b90..fc107414d40 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -510,12 +510,9 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, // This method has to be removed once we move scm/TestUtils.java // from server-scm project to container-service or to common project. private static DatanodeDetails randomDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 219645c8edc..2e1e0eafd01 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.UUID; @@ -43,6 +44,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; @@ -270,12 +273,9 @@ private CloseContainerCommand forceCloseWithoutPipeline() { */ private static DatanodeDetails randomDatanodeDetails() { String ipAddress = "127.0.0.1"; - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") @@ -292,4 +292,28 @@ private void waitTillFinishExecution( GenericTestUtils.waitFor(() -> closeHandler.getQueuedCount() <= 0, 10, 3000); } + + @Test + public void testThreadPoolPoolSize() { + assertEquals(1, subject.getThreadPoolMaxPoolSize()); + assertEquals(0, subject.getThreadPoolActivePoolSize()); + + CloseContainerCommandHandler closeContainerCommandHandler = + new CloseContainerCommandHandler(10, 10, ""); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 1, PipelineID.randomId()), + ozoneContainer, context, null); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 2, PipelineID.randomId()), + ozoneContainer, context, null); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 3, PipelineID.randomId()), + ozoneContainer, context, null); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 4, PipelineID.randomId()), + ozoneContainer, context, null); + assertEquals(10, closeContainerCommandHandler.getThreadPoolMaxPoolSize()); + assertTrue(closeContainerCommandHandler.getThreadPoolActivePoolSize() > 0); + } + } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java index 49c34828fbd..5ee31b97fd6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java @@ -19,6 +19,14 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.LinkedBlockingQueue; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -32,7 +40,6 @@ import java.time.ZoneId; import java.util.OptionalLong; -import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -63,8 +70,14 @@ public void setup() { } @Test - public void testExpiredCommandsAreNotProcessed() throws IOException { - DeleteContainerCommandHandler handler = createSubject(clock, 1000); + public void testExpiredCommandsAreNotProcessed() + throws IOException, InterruptedException { + CountDownLatch latch1 = new CountDownLatch(1); + ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); + ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor( + threadFactory, latch1); + DeleteContainerCommandHandler handler = new DeleteContainerCommandHandler( + clock, executor, 100); DeleteContainerCommand command1 = new DeleteContainerCommand(1L); command1.setDeadline(clock.millis() + 10000); @@ -75,9 +88,14 @@ public void testExpiredCommandsAreNotProcessed() throws IOException { clock.fastForward(15000); handler.handle(command1, ozoneContainer, null, null); + latch1.await(); assertEquals(1, handler.getTimeoutCount()); + CountDownLatch latch2 = new CountDownLatch(2); + executor.setLatch(latch2); handler.handle(command2, ozoneContainer, null, null); handler.handle(command3, ozoneContainer, null, null); + latch2.await(); + assertEquals(1, handler.getTimeoutCount()); assertEquals(3, handler.getInvocationCount()); verify(controller, times(0)) @@ -89,7 +107,8 @@ public void testExpiredCommandsAreNotProcessed() throws IOException { } @Test - public void testCommandForCurrentTermIsExecuted() throws IOException { + public void testCommandForCurrentTermIsExecuted() + throws IOException, InterruptedException { // GIVEN DeleteContainerCommand command = new DeleteContainerCommand(1L); command.setTerm(1); @@ -97,10 +116,17 @@ public void testCommandForCurrentTermIsExecuted() throws IOException { when(context.getTermOfLeaderSCM()) .thenReturn(OptionalLong.of(command.getTerm())); - DeleteContainerCommandHandler subject = createSubject(); + TestClock testClock = new TestClock(Instant.now(), ZoneId.systemDefault()); + CountDownLatch latch = new CountDownLatch(1); + ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); + ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor( + threadFactory, latch); + DeleteContainerCommandHandler subject = new DeleteContainerCommandHandler( + testClock, executor, 100); // WHEN subject.handle(command, ozoneContainer, context, null); + latch.await(); // THEN verify(controller, times(1)) @@ -163,8 +189,10 @@ private static DeleteContainerCommandHandler createSubject() { private static DeleteContainerCommandHandler createSubject( TestClock clock, int queueSize) { - return new DeleteContainerCommandHandler(clock, - newDirectExecutorService(), queueSize); + ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); + ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors. + newFixedThreadPool(1, threadFactory); + return new DeleteContainerCommandHandler(clock, executor, queueSize); } private static DeleteContainerCommandHandler createSubjectWithPoolSize( @@ -172,4 +200,21 @@ private static DeleteContainerCommandHandler createSubjectWithPoolSize( return new DeleteContainerCommandHandler(1, clock, queueSize, ""); } + static class ThreadPoolWithLockExecutor extends ThreadPoolExecutor { + private CountDownLatch countDownLatch; + ThreadPoolWithLockExecutor(ThreadFactory threadFactory, CountDownLatch latch) { + super(1, 1, 0, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), threadFactory); + this.countDownLatch = latch; + } + + void setLatch(CountDownLatch latch) { + this.countDownLatch = latch; + } + + @Override + protected void afterExecute(Runnable r, Throwable t) { + countDownLatch.countDown(); + } + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java new file mode 100644 index 00000000000..7e6c7608180 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; +import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +/** + * Test cases to verify {@link ReconstructECContainersCommandHandler}. + */ +public class TestReconstructECContainersCommandHandler { + private OzoneConfiguration conf; + private ReplicationSupervisor supervisor; + private ECReconstructionCoordinator coordinator; + private OzoneContainer ozoneContainer; + private StateContext stateContext; + private SCMConnectionManager connectionManager; + + @BeforeEach + public void setUp() { + supervisor = mock(ReplicationSupervisor.class); + coordinator = mock(ECReconstructionCoordinator.class); + conf = new OzoneConfiguration(); + ozoneContainer = mock(OzoneContainer.class); + connectionManager = mock(SCMConnectionManager.class); + stateContext = mock(StateContext.class); + } + + @Test + public void testMetrics() { + ReconstructECContainersCommandHandler commandHandler = + new ReconstructECContainersCommandHandler(conf, supervisor, coordinator); + doNothing().when(supervisor).addTask(any()); + Map handlerMap = new HashMap<>(); + handlerMap.put(commandHandler.getCommandType(), commandHandler); + CommandHandlerMetrics metrics = CommandHandlerMetrics.create(handlerMap); + try { + byte[] missingIndexes = {1, 2}; + ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(missingIndexes); + ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2); + List dnDetails = getDNDetails(5); + List sources = + dnDetails.stream().map(a -> new ReconstructECContainersCommand + .DatanodeDetailsAndReplicaIndex(a, dnDetails.indexOf(a))) + .collect(Collectors.toList()); + List targets = getDNDetails(2); + ReconstructECContainersCommand reconstructECContainersCommand = + new ReconstructECContainersCommand(1L, sources, targets, + missingContainerIndexes, ecReplicationConfig); + + commandHandler.handle(reconstructECContainersCommand, ozoneContainer, + stateContext, connectionManager); + String metricsName = "ECReconstructions"; + assertEquals(commandHandler.getMetricsName(), metricsName); + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 1); + + commandHandler.handle(new ReconstructECContainersCommand(2L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(3L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(4L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(5L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(6L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(5L); + when(supervisor.getReplicationRequestTotalTime(metricsName)).thenReturn(10L); + when(supervisor.getReplicationRequestAvgTime(metricsName)).thenReturn(2L); + when(supervisor.getReplicationQueuedCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 5); + assertEquals(commandHandler.getQueuedCount(), 1); + assertEquals(commandHandler.getTotalRunTime(), 10); + assertEquals(commandHandler.getAverageRunTime(), 2); + + MetricsCollectorImpl metricsCollector = new MetricsCollectorImpl(); + metrics.getMetrics(metricsCollector, true); + assertEquals(1, metricsCollector.getRecords().size()); + } finally { + metrics.unRegister(); + } + } + + private List getDNDetails(int numDns) { + List dns = new ArrayList<>(); + for (int i = 0; i < numDns; i++) { + dns.add(MockDatanodeDetails.randomDatanodeDetails()); + } + return dns; + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java new file mode 100644 index 00000000000..9de00877e5b --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.container.replication.ContainerReplicator; +import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doNothing; + +/** + * Test cases to verify {@link ReplicateContainerCommandHandler}. + */ +public class TestReplicateContainerCommandHandler { + private OzoneConfiguration conf; + private ReplicationSupervisor supervisor; + private ContainerReplicator downloadReplicator; + private ContainerReplicator pushReplicator; + private OzoneContainer ozoneContainer; + private StateContext stateContext; + private SCMConnectionManager connectionManager; + + @BeforeEach + public void setUp() { + conf = new OzoneConfiguration(); + supervisor = mock(ReplicationSupervisor.class); + downloadReplicator = mock(ContainerReplicator.class); + pushReplicator = mock(ContainerReplicator.class); + ozoneContainer = mock(OzoneContainer.class); + connectionManager = mock(SCMConnectionManager.class); + stateContext = mock(StateContext.class); + } + + @Test + public void testMetrics() { + ReplicateContainerCommandHandler commandHandler = + new ReplicateContainerCommandHandler(conf, supervisor, + downloadReplicator, pushReplicator); + Map handlerMap = new HashMap<>(); + handlerMap.put(commandHandler.getCommandType(), commandHandler); + CommandHandlerMetrics metrics = CommandHandlerMetrics.create(handlerMap); + try { + doNothing().when(supervisor).addTask(any()); + DatanodeDetails source = MockDatanodeDetails.randomDatanodeDetails(); + DatanodeDetails target = MockDatanodeDetails.randomDatanodeDetails(); + List sourceList = new ArrayList<>(); + sourceList.add(source); + + ReplicateContainerCommand command = ReplicateContainerCommand.fromSources( + 1, sourceList); + commandHandler.handle(command, ozoneContainer, stateContext, connectionManager); + String metricsName = "ContainerReplications"; + assertEquals(commandHandler.getMetricsName(), metricsName); + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 1); + + commandHandler.handle(ReplicateContainerCommand.fromSources(2, sourceList), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.fromSources(3, sourceList), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.toTarget(4, target), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.toTarget(5, target), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.fromSources(6, sourceList), + ozoneContainer, stateContext, connectionManager); + + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(5L); + when(supervisor.getReplicationRequestTotalTime(metricsName)).thenReturn(10L); + when(supervisor.getReplicationRequestAvgTime(metricsName)).thenReturn(3L); + when(supervisor.getReplicationQueuedCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 5); + assertEquals(commandHandler.getQueuedCount(), 1); + assertEquals(commandHandler.getTotalRunTime(), 10); + assertEquals(commandHandler.getAverageRunTime(), 3); + + MetricsCollectorImpl metricsCollector = new MetricsCollectorImpl(); + metrics.getMetrics(metricsCollector, true); + assertEquals(1, metricsCollector.getRecords().size()); + } finally { + metrics.unRegister(); + } + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index 5e2dd0c75c9..5e0a31944f7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -37,6 +38,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertNotEquals; /** @@ -166,6 +168,16 @@ public void testInvalidConfig() throws Exception { assertEquals(getExpectedDefaultReserved(hddsVolume2), reservedFromVolume2); } + @Test + public void testInvalidConfigThrowsException() { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, "15GB"); + + assertThrows(ConfigurationException.class, + () -> volumeBuilder.conf(conf).build(), + "Reserved space should be configured in a pair"); + } + @Test public void testPathsCanonicalized() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java index c8934bab416..1df886098ab 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.PrometheusMetricsSink; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -30,6 +31,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY; /** * Test PrometheusMetricSink regarding VolumeIOStats. @@ -54,11 +56,14 @@ public void tearDown() { @Test public void testMultipleVolumeIOMetricsExist() throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(); + int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); + //GIVEN VolumeIOStats volumeIOStats1 = new VolumeIOStats("VolumeIOStat1", - "vol1/dir"); + "vol1/dir", intervals); VolumeIOStats volumeIOStat2 = new VolumeIOStats("VolumeIOStat2", - "vol2/dir"); + "vol2/dir", intervals); //WHEN String writtenMetrics = publishMetricsAndGetOutput(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 55df5f43b6b..0b24161aadb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -340,6 +340,7 @@ public void testVolumeFailure() throws IOException { conSet.handleVolumeFailures(stateContext); // ContainerID1 should be removed belonging to failed volume assertNull(conSet.getContainer(containerID1)); + assertTrue(conSet.getMissingContainerSet().contains(containerID1)); // ContainerID should exist belonging to normal volume assertNotNull(conSet.getContainer(containerID)); expectedReportCount.put( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index e1a3de30ddf..584db675d93 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -689,7 +689,7 @@ public void testContainerRocksDB(ContainerTestVersionInfo versionInfo) try (DBHandle db = BlockUtils.getDB(keyValueContainerData, CONF)) { RDBStore store = (RDBStore) db.getStore().getStore(); - long defaultCacheSize = 64 * OzoneConsts.MB; + long defaultCacheSize = OzoneConsts.GB; long cacheSize = Long.parseLong(store .getProperty("rocksdb.block-cache-capacity")); assertEquals(defaultCacheSize, cacheSize); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 30a8a9bcbce..0ff2aaa22b5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -83,6 +83,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mockito; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; @@ -147,7 +148,13 @@ public void testHandlerCommandHandling() throws Exception { .build(); KeyValueContainer container = mock(KeyValueContainer.class); - + KeyValueContainerData containerData = mock(KeyValueContainerData.class); + Mockito.when(container.getContainerData()).thenReturn(containerData); + Mockito.when(containerData.getReplicaIndex()).thenReturn(1); + ContainerProtos.ContainerCommandResponseProto responseProto = KeyValueHandler.dispatchRequest(handler, + createContainerRequest, container, null); + assertEquals(ContainerProtos.Result.INVALID_ARGUMENT, responseProto.getResult()); + Mockito.when(handler.getDatanodeId()).thenReturn(DATANODE_UUID); KeyValueHandler .dispatchRequest(handler, createContainerRequest, container, null); verify(handler, times(0)).handleListBlock( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index af0c430c86d..5f1a93ef2fb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -133,8 +133,7 @@ public void testGetBlockWithReplicaIndexMismatch(ClientVersion clientVersion, in handler.handleGetBlock( getDummyCommandRequestProto(clientVersion, ContainerProtos.Type.GetBlock, rid), container); - assertEquals((replicaIndex > 0 && rid != replicaIndex && clientVersion.toProtoValue() >= - ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue()) ? + assertEquals((replicaIndex > 0 && rid != 0 && rid != replicaIndex) ? ContainerProtos.Result.CONTAINER_NOT_FOUND : UNKNOWN_BCSID, response.getResult()); } @@ -176,8 +175,7 @@ public void testReadChunkWithReplicaIndexMismatch(ClientVersion clientVersion, i ContainerProtos.ContainerCommandResponseProto response = handler.handleReadChunk(getDummyCommandRequestProto(clientVersion, ContainerProtos.Type.ReadChunk, rid), container, null); - assertEquals((replicaIndex > 0 && rid != replicaIndex && - clientVersion.toProtoValue() >= ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue()) ? + assertEquals((replicaIndex > 0 && rid != 0 && rid != replicaIndex) ? ContainerProtos.Result.CONTAINER_NOT_FOUND : UNKNOWN_BCSID, response.getResult()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java index d04f3a5167f..5454f9e8a9b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; +import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager; import org.apache.ozone.test.GenericTestUtils; import org.apache.commons.io.FileUtils; @@ -70,6 +71,7 @@ class TestChunkUtils { private static final int BUFFER_CAPACITY = 1 << 20; private static final int MAPPED_BUFFER_THRESHOLD = 32 << 10; private static final Random RANDOM = new Random(); + private static final MappedBufferManager MAPPED_BUFFER_MANAGER = new MappedBufferManager(100); @TempDir private File tempDir; @@ -78,7 +80,7 @@ static ChunkBuffer readData(File file, long off, long len) throws StorageContainerException { LOG.info("off={}, len={}", off, len); return ChunkUtils.readData(len, BUFFER_CAPACITY, file, off, null, - MAPPED_BUFFER_THRESHOLD); + MAPPED_BUFFER_THRESHOLD, true, MAPPED_BUFFER_MANAGER); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java index 0c373cb0dbf..d9b95f199dd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java @@ -34,8 +34,13 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.BufferedReader; import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; import java.nio.Buffer; import java.nio.ByteBuffer; import java.util.UUID; @@ -53,6 +58,8 @@ * Helpers for ChunkManager implementation tests. */ public abstract class AbstractTestChunkManager { + private static final Logger LOG = + LoggerFactory.getLogger(AbstractTestChunkManager.class); private HddsVolume hddsVolume; private KeyValueContainerData keyValueContainerData; @@ -128,6 +135,55 @@ protected void checkChunkFileCount(int expected) { assertEquals(expected, files.length); } + /** + * Helper method to check if a file is in use. + */ + public static boolean isFileNotInUse(String filePath) { + try { + Process process = new ProcessBuilder("fuser", filePath).start(); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8))) { + String output = reader.readLine(); // If fuser returns no output, the file is not in use + if (output == null) { + return true; + } + LOG.debug("File is in use: {}", filePath); + return false; + } finally { + process.destroy(); + } + } catch (IOException e) { + LOG.warn("Failed to check if file is in use: {}", filePath, e); + return false; // On failure, assume the file is in use + } + } + + protected boolean checkChunkFilesClosed() { + return checkChunkFilesClosed(keyValueContainerData.getChunksPath()); + } + + /** + * check that all files under chunk path are closed. + */ + public static boolean checkChunkFilesClosed(String path) { + //As in Setup, we try to create container, these paths should exist. + assertNotNull(path); + + File dir = new File(path); + assertTrue(dir.exists()); + + File[] files = dir.listFiles(); + assertNotNull(files); + for (File file : files) { + assertTrue(file.exists()); + assertTrue(file.isFile()); + // check that the file is closed. + if (!isFileNotInUse(file.getAbsolutePath())) { + return false; + } + } + return true; + } + protected void checkWriteIOStats(long length, long opCount) { VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats(); assertEquals(length, volumeIOStats.getWriteBytes()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index 47d24874749..d4a12f577e9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; import java.io.File; import java.io.IOException; @@ -39,7 +40,9 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.when; /** * Common test cases for ChunkManager implementation tests. @@ -222,4 +225,26 @@ public void testWriteAndReadChunkMultipleTimes() throws Exception { checkReadIOStats(len * count, count); } + @Test + public void testFinishWrite() throws Exception { + // GIVEN + ChunkManager chunkManager = createTestSubject(); + checkChunkFileCount(0); + checkWriteIOStats(0, 0); + + chunkManager.writeChunk(getKeyValueContainer(), getBlockID(), + getChunkInfo(), getData(), + WRITE_STAGE); + + BlockData blockData = Mockito.mock(BlockData.class); + when(blockData.getBlockID()).thenReturn(getBlockID()); + + chunkManager.finishWriteChunks(getKeyValueContainer(), blockData); + assertTrue(checkChunkFilesClosed()); + + // THEN + checkChunkFileCount(1); + checkWriteIOStats(getChunkInfo().getLen(), 1); + } + } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java new file mode 100644 index 00000000000..22406975986 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestMappedBufferManager.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.keyvalue.impl; + +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test for MappedBufferManager. + */ +public class TestMappedBufferManager { + + @Test + public void testComputeIfAbsent() { + MappedBufferManager manager = new MappedBufferManager(100); + String file = "/CID-fd49f4a7-670d-43c5-a177-8ac03aafceb2/current/containerDir0/2/chunks/113750153625600065.block"; + long position = 0; + int size = 1024; + ByteBuffer buffer1 = ByteBuffer.allocate(size); + ByteBuffer buffer2 = ByteBuffer.allocate(size + 1); + ByteBuffer byteBuffer1 = manager.computeIfAbsent(file, position, size, () -> buffer1); + assertEquals(buffer1, byteBuffer1); + // buffer should be reused + String file2 = "/CID-fd49f4a7-670d-43c5-a177-8ac03aafceb2/current/containerDir0/2/chunks/113750153625600065.block"; + ByteBuffer byteBuffer2 = manager.computeIfAbsent(file2, position, size, () -> buffer2); + assertEquals(buffer1, byteBuffer2); + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 07804c2a20b..2f2cbc81e90 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -51,7 +52,9 @@ import java.io.File; import java.nio.file.Files; import java.nio.file.Path; +import java.util.HashSet; import java.util.Random; +import java.util.Set; import java.util.UUID; import java.util.HashMap; import java.util.List; @@ -122,7 +125,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) volume.format(clusterId); commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); } - + List containerDatas = new ArrayList<>(); // Add containers to disk int numTestContainers = 10; for (int i = 0; i < numTestContainers; i++) { @@ -136,6 +139,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) layout, maxCap, UUID.randomUUID().toString(), datanodeDetails.getUuidString()); + containerDatas.add(keyValueContainerData); keyValueContainer = new KeyValueContainer( keyValueContainerData, conf); keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId); @@ -156,8 +160,22 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) ozoneContainer.buildContainerSet(); ContainerSet containerset = ozoneContainer.getContainerSet(); assertEquals(numTestContainers, containerset.containerCount()); - verifyCommittedSpace(ozoneContainer); + Set missingContainers = new HashSet<>(); + for (int i = 0; i < numTestContainers; i++) { + if (i % 2 == 0) { + missingContainers.add(containerDatas.get(i).getContainerID()); + FileUtils.deleteDirectory(new File(containerDatas.get(i).getContainerPath())); + } + } + ozoneContainer.stop(); + ozoneContainer = ContainerTestUtils.getOzoneContainer(datanodeDetails, conf); + ozoneContainer.buildContainerSet(); + containerset = ozoneContainer.getContainerSet(); + assertEquals(numTestContainers / 2, containerset.containerCount()); + assertEquals(numTestContainers / 2 + numTestContainers % 2, containerset.getMissingContainerSet().size()); + assertEquals(missingContainers, containerset.getMissingContainerSet()); + ozoneContainer.stop(); } @ContainerTestVersionInfo.ContainerTest @@ -300,12 +318,9 @@ private DatanodeDetails createDatanodeDetails() { random.nextInt(256) + "." + random.nextInt(256) + "." + random .nextInt(256) + "." + random.nextInt(256); - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index b8c43460ba3..c1cf59f0fdd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -82,17 +82,15 @@ class TestGrpcReplicationService { @BeforeEach public void setUp() throws Exception { - init(false); + init(); } - public void init(boolean isZeroCopy) throws Exception { + public void init() throws Exception { conf = new OzoneConfiguration(); ReplicationServer.ReplicationConfig replicationConfig = conf.getObject(ReplicationServer.ReplicationConfig.class); - replicationConfig.setZeroCopyEnable(isZeroCopy); - SecurityConfig secConf = new SecurityConfig(conf); ContainerSet containerSet = new ContainerSet(1000); @@ -103,11 +101,9 @@ public void init(boolean isZeroCopy) throws Exception { .setPersistedOpState(HddsProtos.NodeOperationalState.IN_SERVICE) .setPersistedOpStateExpiry(0); DatanodeDetails.Port containerPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); + DatanodeDetails.newStandalonePort(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); DatanodeDetails.Port ratisPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, - OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + DatanodeDetails.newRatisPort(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); DatanodeDetails.Port replicationPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.REPLICATION, replicationConfig.getPort()); @@ -226,7 +222,7 @@ public void copyData(long containerId, OutputStream destination, }; ContainerImporter importer = mock(ContainerImporter.class); GrpcReplicationService subject = - new GrpcReplicationService(source, importer, false); + new GrpcReplicationService(source, importer); CopyContainerRequestProto request = CopyContainerRequestProto.newBuilder() .setContainerID(1) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 1f69db78d62..315e0c0253b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -27,6 +27,7 @@ import java.time.Instant; import java.time.ZoneId; import java.util.List; +import java.util.SortedMap; import java.util.UUID; import java.util.concurrent.AbstractExecutorService; import java.util.concurrent.CountDownLatch; @@ -46,6 +47,8 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -55,7 +58,9 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCommandInfo; +import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinatorTask; +import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -82,6 +87,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; import static org.mockito.Mockito.any; @@ -109,6 +115,8 @@ public class TestReplicationSupervisor { }; private final AtomicReference replicatorRef = new AtomicReference<>(); + private final AtomicReference ecReplicatorRef = + new AtomicReference<>(); private ContainerSet set; @@ -135,6 +143,7 @@ public void setUp() throws Exception { @AfterEach public void cleanup() { replicatorRef.set(null); + ecReplicatorRef.set(null); } @ContainerLayoutTestInfo.ContainerTest @@ -394,6 +403,116 @@ public void taskWithObsoleteTermIsDropped(ContainerLayoutVersion layout) { assertEquals(0, supervisor.getReplicationSuccessCount()); } + @ContainerLayoutTestInfo.ContainerTest + public void testMultipleReplication(ContainerLayoutVersion layout, + @TempDir File tempFile) throws IOException { + this.layoutVersion = layout; + OzoneConfiguration conf = new OzoneConfiguration(); + // GIVEN + ReplicationSupervisor replicationSupervisor = + supervisorWithReplicator(FakeReplicator::new); + ReplicationSupervisor ecReconstructionSupervisor = supervisorWithECReconstruction(); + ReplicationSupervisorMetrics replicationMetrics = + ReplicationSupervisorMetrics.create(replicationSupervisor); + ReplicationSupervisorMetrics ecReconstructionMetrics = + ReplicationSupervisorMetrics.create(ecReconstructionSupervisor); + try { + //WHEN + replicationSupervisor.addTask(createTask(1L)); + ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(2L)); + replicationSupervisor.addTask(createTask(1L)); + replicationSupervisor.addTask(createTask(3L)); + ecReconstructionSupervisor.addTask(createECTaskWithCoordinator(4L)); + + SimpleContainerDownloader moc = mock(SimpleContainerDownloader.class); + Path res = Paths.get("file:/tmp/no-such-file"); + when(moc.getContainerDataFromReplicas(anyLong(), anyList(), + any(Path.class), any())).thenReturn(res); + + final String testDir = tempFile.getPath(); + MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); + when(volumeSet.getVolumesList()).thenReturn(singletonList( + new HddsVolume.Builder(testDir).conf(conf).build())); + ContainerController mockedCC = mock(ContainerController.class); + ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, volumeSet); + ContainerReplicator replicator = new DownloadAndImportReplicator( + conf, set, importer, moc); + replicatorRef.set(replicator); + replicationSupervisor.addTask(createTask(5L)); + + ReplicateContainerCommand cmd1 = createCommand(6L); + cmd1.setDeadline(clock.millis() + 10000); + ReplicationTask task1 = new ReplicationTask(cmd1, replicatorRef.get()); + clock.fastForward(15000); + replicationSupervisor.addTask(task1); + + ReconstructECContainersCommand cmd2 = createReconstructionCmd(7L); + cmd2.setDeadline(clock.millis() + 10000); + ECReconstructionCoordinatorTask task2 = new ECReconstructionCoordinatorTask( + ecReplicatorRef.get(), new ECReconstructionCommandInfo(cmd2)); + clock.fastForward(15000); + ecReconstructionSupervisor.addTask(task2); + ecReconstructionSupervisor.addTask(createECTask(8L)); + ecReconstructionSupervisor.addTask(createECTask(9L)); + + //THEN + assertEquals(2, replicationSupervisor.getReplicationSuccessCount()); + assertEquals(2, replicationSupervisor.getReplicationSuccessCount( + task1.getMetricName())); + assertEquals(1, replicationSupervisor.getReplicationFailureCount()); + assertEquals(1, replicationSupervisor.getReplicationFailureCount( + task1.getMetricName())); + assertEquals(1, replicationSupervisor.getReplicationSkippedCount()); + assertEquals(1, replicationSupervisor.getReplicationSkippedCount( + task1.getMetricName())); + assertEquals(1, replicationSupervisor.getReplicationTimeoutCount()); + assertEquals(1, replicationSupervisor.getReplicationTimeoutCount( + task1.getMetricName())); + assertEquals(5, replicationSupervisor.getReplicationRequestCount()); + assertEquals(5, replicationSupervisor.getReplicationRequestCount( + task1.getMetricName())); + assertEquals(0, replicationSupervisor.getReplicationRequestCount( + task2.getMetricName())); + + assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount()); + assertEquals(2, ecReconstructionSupervisor.getReplicationSuccessCount( + task2.getMetricName())); + assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount()); + assertEquals(1, ecReconstructionSupervisor.getReplicationTimeoutCount( + task2.getMetricName())); + assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount()); + assertEquals(2, ecReconstructionSupervisor.getReplicationFailureCount( + task2.getMetricName())); + assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount()); + assertEquals(5, ecReconstructionSupervisor.getReplicationRequestCount( + task2.getMetricName())); + assertEquals(0, ecReconstructionSupervisor.getReplicationRequestCount( + task1.getMetricName())); + + assertTrue(replicationSupervisor.getReplicationRequestTotalTime( + task1.getMetricName()) > 0); + assertTrue(ecReconstructionSupervisor.getReplicationRequestTotalTime( + task2.getMetricName()) > 0); + assertTrue(replicationSupervisor.getReplicationRequestAvgTime( + task1.getMetricName()) > 0); + assertTrue(ecReconstructionSupervisor.getReplicationRequestAvgTime( + task2.getMetricName()) > 0); + + MetricsCollectorImpl replicationMetricsCollector = new MetricsCollectorImpl(); + replicationMetrics.getMetrics(replicationMetricsCollector, true); + assertEquals(1, replicationMetricsCollector.getRecords().size()); + + MetricsCollectorImpl ecReconstructionMetricsCollector = new MetricsCollectorImpl(); + ecReconstructionMetrics.getMetrics(ecReconstructionMetricsCollector, true); + assertEquals(1, ecReconstructionMetricsCollector.getRecords().size()); + } finally { + replicationMetrics.unRegister(); + ecReconstructionMetrics.unRegister(); + replicationSupervisor.stop(); + ecReconstructionSupervisor.stop(); + } + } + @ContainerLayoutTestInfo.ContainerTest public void testPriorityOrdering(ContainerLayoutVersion layout) throws InterruptedException { @@ -476,6 +595,16 @@ private static class BlockingTask extends AbstractReplicationTask { this.waitForCompleteLatch = waitForCompletion; } + @Override + protected String getMetricName() { + return "Blockings"; + } + + @Override + protected String getMetricDescriptionSegment() { + return "blockings"; + } + @Override public void runTask() { runningLatch.countDown(); @@ -502,6 +631,16 @@ private static class OrderedTask extends AbstractReplicationTask { setPriority(priority); } + @Override + protected String getMetricName() { + return "Ordereds"; + } + + @Override + protected String getMetricDescriptionSegment() { + return "ordereds"; + } + @Override public void runTask() { completeList.add(name); @@ -531,6 +670,22 @@ private ReplicationSupervisor supervisorWith( return supervisor; } + private ReplicationSupervisor supervisorWithECReconstruction() throws IOException { + ConfigurationSource conf = new OzoneConfiguration(); + ExecutorService executor = newDirectExecutorService(); + ReplicationServer.ReplicationConfig repConf = + conf.getObject(ReplicationServer.ReplicationConfig.class); + ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder() + .stateContext(context).replicationConfig(repConf).executor(executor) + .clock(clock).build(); + + FakeECReconstructionCoordinator coordinator = new FakeECReconstructionCoordinator( + new OzoneConfiguration(), null, null, context, + ECReconstructionMetrics.create(), "", supervisor); + ecReplicatorRef.set(coordinator); + return supervisor; + } + private ReplicationTask createTask(long containerId) { ReplicateContainerCommand cmd = createCommand(containerId); return new ReplicationTask(cmd, replicatorRef.get()); @@ -538,7 +693,13 @@ private ReplicationTask createTask(long containerId) { private ECReconstructionCoordinatorTask createECTask(long containerId) { return new ECReconstructionCoordinatorTask(null, - createReconstructionCmd(containerId)); + createReconstructionCmdInfo(containerId)); + } + + private ECReconstructionCoordinatorTask createECTaskWithCoordinator(long containerId) { + ECReconstructionCommandInfo ecReconstructionCommandInfo = createReconstructionCmdInfo(containerId); + return new ECReconstructionCoordinatorTask(ecReplicatorRef.get(), + ecReconstructionCommandInfo); } private static ReplicateContainerCommand createCommand(long containerId) { @@ -548,18 +709,20 @@ private static ReplicateContainerCommand createCommand(long containerId) { return cmd; } - private static ECReconstructionCommandInfo createReconstructionCmd( + private static ECReconstructionCommandInfo createReconstructionCmdInfo( long containerId) { - List sources - = new ArrayList<>(); - sources.add(new ReconstructECContainersCommand - .DatanodeDetailsAndReplicaIndex( - MockDatanodeDetails.randomDatanodeDetails(), 1)); - sources.add(new ReconstructECContainersCommand - .DatanodeDetailsAndReplicaIndex( + return new ECReconstructionCommandInfo(createReconstructionCmd(containerId)); + } + + private static ReconstructECContainersCommand createReconstructionCmd( + long containerId) { + List sources = + new ArrayList<>(); + sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( + MockDatanodeDetails.randomDatanodeDetails(), 1)); + sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( MockDatanodeDetails.randomDatanodeDetails(), 2)); - sources.add(new ReconstructECContainersCommand - .DatanodeDetailsAndReplicaIndex( + sources.add(new ReconstructECContainersCommand.DatanodeDetailsAndReplicaIndex( MockDatanodeDetails.randomDatanodeDetails(), 3)); byte[] missingIndexes = new byte[1]; @@ -567,14 +730,44 @@ private static ECReconstructionCommandInfo createReconstructionCmd( List target = singletonList( MockDatanodeDetails.randomDatanodeDetails()); - ReconstructECContainersCommand cmd = - new ReconstructECContainersCommand(containerId, - sources, - target, - Proto2Utils.unsafeByteString(missingIndexes), - new ECReplicationConfig(3, 2)); - - return new ECReconstructionCommandInfo(cmd); + ReconstructECContainersCommand cmd = new ReconstructECContainersCommand(containerId, sources, target, + Proto2Utils.unsafeByteString(missingIndexes), + new ECReplicationConfig(3, 2)); + cmd.setTerm(CURRENT_TERM); + return cmd; + } + + /** + * A fake coordinator that simulates successful reconstruction of ec containers. + */ + private class FakeECReconstructionCoordinator extends ECReconstructionCoordinator { + + private final OzoneConfiguration conf = new OzoneConfiguration(); + private final ReplicationSupervisor supervisor; + + FakeECReconstructionCoordinator(ConfigurationSource conf, + CertificateClient certificateClient, SecretKeySignerClient secretKeyClient, + StateContext context, ECReconstructionMetrics metrics, String threadNamePrefix, + ReplicationSupervisor supervisor) + throws IOException { + super(conf, certificateClient, secretKeyClient, context, metrics, threadNamePrefix); + this.supervisor = supervisor; + } + + @Override + public void reconstructECContainerGroup(long containerID, + ECReplicationConfig repConfig, SortedMap sourceNodeMap, + SortedMap targetNodeMap) { + assertEquals(1, supervisor.getTotalInFlightReplications()); + + KeyValueContainerData kvcd = new KeyValueContainerData( + containerID, layoutVersion, 100L, + UUID.randomUUID().toString(), UUID.randomUUID().toString()); + KeyValueContainer kvc = new KeyValueContainer(kvcd, conf); + assertDoesNotThrow(() -> { + set.addContainer(kvc); + }); + } } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java index e9fef6ecfd6..55bddf2e99a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java @@ -72,12 +72,9 @@ public void testStartupSlvLessThanMlv() throws Exception { } private DatanodeDetails getNewDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); return DatanodeDetails.newBuilder() .setUuid(UUID.randomUUID()) .setHostName("localhost") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java index f4e4ec6a253..519a24a2a5c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java @@ -26,10 +26,12 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -53,11 +55,8 @@ public void testExceptionIfSourceAndMissingNotSameLength() { @Test public void protobufConversion() { - final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(new byte[]{1, 2}); - List srcNodesIndexes = new ArrayList<>(); - for (int i = 0; i < srcNodesIndexes.size(); i++) { - srcNodesIndexes.add(i + 1L); - } + byte[] missingIndexes = {1, 2}; + final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(missingIndexes); ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2); final List dnDetails = getDNDetails(5); @@ -70,6 +69,10 @@ public void protobufConversion() { ReconstructECContainersCommand reconstructECContainersCommand = new ReconstructECContainersCommand(1L, sources, targets, missingContainerIndexes, ecReplicationConfig); + + assertThat(reconstructECContainersCommand.toString()) + .contains("missingIndexes: " + Arrays.toString(missingIndexes)); + StorageContainerDatanodeProtocolProtos.ReconstructECContainersCommandProto proto = reconstructECContainersCommand.getProto(); diff --git a/hadoop-hdds/crypto-api/pom.xml b/hadoop-hdds/crypto-api/pom.xml index db19cc4f341..ca54b3de9f2 100644 --- a/hadoop-hdds/crypto-api/pom.xml +++ b/hadoop-hdds/crypto-api/pom.xml @@ -19,11 +19,11 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-crypto-api - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store cryptographic functions Apache Ozone HDDS Crypto diff --git a/hadoop-hdds/crypto-default/pom.xml b/hadoop-hdds/crypto-default/pom.xml index c586f91712b..6024c3e2ddf 100644 --- a/hadoop-hdds/crypto-default/pom.xml +++ b/hadoop-hdds/crypto-default/pom.xml @@ -19,11 +19,11 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-crypto-default - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Default implementation of Apache Ozone Distributed Data Store's cryptographic functions Apache Ozone HDDS Crypto - Default diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index 3a69c793c26..288085ef948 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -88,6 +88,7 @@ + diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md index 47c09a798fc..cf246712f68 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.md @@ -76,3 +76,15 @@ blocks that get reported. That is a 40x reduction in the block reports. This extra indirection helps tremendously with scaling Ozone. SCM has far less block data to process and the namespace service (Ozone Manager) as a different service are critical to scaling Ozone. + + +## Notable configurations + +key | default |

    description
    +----|---------|------------ +dfs.container.ratis.datanode.storage.dir | none | This directory is used for storing Ratis metadata like logs. +ozone.scm.datanode.id.dir | none | The path that datanodes will use to store the datanode ID. +hdds.datanode.dir | none | Determines where HDDS data will be stored on the local filesystem. +hdds.datanode.dir.du.reserved | none | Reserved space in bytes per volume. Always leave this much space free for non dfs use. +ozone.metadata.dirs | none | Directory to store persisted data (RocksDB). +ozone.recon.address | 0.0.0.0:9891 | RPC address of the Recon. Use to connect Recon. diff --git a/hadoop-hdds/docs/content/concept/Datanodes.zh.md b/hadoop-hdds/docs/content/concept/Datanodes.zh.md index 8f129df7b9b..32071c9e51e 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.zh.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.zh.md @@ -49,3 +49,15 @@ Ozone 的存储容器是一个自包含的超级块,容器中包含一系列 SCM 如何获得容器的位置?这一点和现有的 HDFS 十分相似。数据节点会定期发送类似于块报告的容器报告,容器报告比块报告的内容简洁的多,比如,对于一个存储容量为 196 TB 的集群,Ozone 大概会拥有四万个容器,相比于 HDFS 的一百五十万个块,块报告数量缩减为四十分之一。 这种间接管理的方式大大地提高了 Ozone 的扩展性,因为 SCM 需要处理的块数据大大减少,且命名服务(OM)作为一个独特的服务主体对于扩展 Ozone 具有重要意义。 + + +## 需要关注的配置项 + +配置项 |默认值 |
    描述
    +----|---------|------------ +dfs.container.ratis.datanode.storage.dir | none | 该目录用于存储 Ratis 元数据,如日志。 +ozone.scm.datanode.id.dir | none | 数据节点上用于存储数据节点 ID 的路径。 +hdds.datanode.dir | none | 此配置决定数据节点上的数据将存储在本地文件系统的哪个位置。 +hdds.datanode.dir.du.reserved | none | 每个卷保留的存储空间(以字节为单位)。始终为非DFS用途保留这么多空闲空间。 +ozone.metadata.dirs | none | 用于存储持久化数据(RocksDB)的目录。 +ozone.recon.address | 0.0.0.0:9891 | Recon的RPC地址。 使用 连接到Recon。 \ No newline at end of file diff --git a/hadoop-hdds/docs/content/feature/Quota.md b/hadoop-hdds/docs/content/feature/Quota.md index 90e413357b5..53c196307fa 100644 --- a/hadoop-hdds/docs/content/feature/Quota.md +++ b/hadoop-hdds/docs/content/feature/Quota.md @@ -1,6 +1,6 @@ --- title: "Quota in Ozone" -date: "2020-October-22" +date: "2020-10-22" weight: 4 summary: Quota in Ozone icon: user diff --git a/hadoop-hdds/docs/content/feature/Quota.zh.md b/hadoop-hdds/docs/content/feature/Quota.zh.md index 16e5db26cde..d690947ef06 100644 --- a/hadoop-hdds/docs/content/feature/Quota.zh.md +++ b/hadoop-hdds/docs/content/feature/Quota.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 中的配额" -date: "2020-October-22" +date: "2020-10-22" weight: 4 summary: Ozone中的配额 icon: user diff --git a/hadoop-hdds/docs/content/feature/SCM-HA.md b/hadoop-hdds/docs/content/feature/SCM-HA.md index cc42500e0c3..333c908275d 100644 --- a/hadoop-hdds/docs/content/feature/SCM-HA.md +++ b/hadoop-hdds/docs/content/feature/SCM-HA.md @@ -96,7 +96,7 @@ Second and third nodes should be *bootstrapped* instead of init. These clusters ozone scm --bootstrap ``` -Note: both commands perform one-time initialization. SCM still needs to be started by running `ozone scm --daemon start`. +Note: both commands perform one-time initialization. SCM still needs to be started by running `ozone --daemon start scm`. ## Auto-bootstrap @@ -121,7 +121,7 @@ Note: SCM still needs to be started after the init/bootstrap process. ``` ozone scm --init ozone scm --bootstrap -ozone scm --daemon start +ozone --daemon start scm ``` For Docker/Kubernetes, use `ozone scm` to start it in the foreground. diff --git a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md index 92ae64f8a1d..002aba4cc2d 100644 --- a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md +++ b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md @@ -25,15 +25,15 @@ summary: Introduction to Ozone Datanode Container Schema V3 In Ozone, user data are separated into blocks and stored in HDDS Containers. Containers are the fundamental replication unit of Ozone/HDDS. Each Container has its metadata and data. Data are saved as files on disk. Metadata is saved in RocksDB. -Currently there will be one RocksDB for each Container on datanode. With user data continously grow, there will be hundreds of thousands of RocksDB instances on one datanode. It's a big challenge to manage this amount of RocksDB instances in one JVM. +Earlier, there was one RocksDB for each Container on datanode. With user data continously growing, there will be hundreds of thousands of RocksDB instances on one datanode. It's a big challenge to manage this amount of RocksDB instances in one JVM. -Unlike the current approach, this "Merge Container RocksDB in DN" feature will use only one RocksDB for each data volume, holding all metadata of Containers in this RocksDB. +Unlike the previous approach, this "Merge Container RocksDB in DN" feature will use only one RocksDB for each data volume, holding all metadata of Containers in this RocksDB. ## Configuration -This is mainly a DN feature, which doesn't require much configuration. +This is mainly a DN feature, which doesn't require much configuration. By default, it is enabled. -Here is a configuration which disable this feature if the current one RocksDB for each container mode is more preferred. Please be noted that once the feature is enabled, it's strongly suggested not to disable it in later. +Here is a configuration which disables this feature if the "one RocksDB for each container" mode is more preferred. Please be noted that once the feature is enabled, it's strongly suggested not to disable it in later. ```XML diff --git a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md index cd3eb5fbdc5..65085a99451 100644 --- a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md +++ b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md @@ -25,13 +25,13 @@ summary: Ozone DataNode Container模式简介V3 在 Ozone 中,用户数据被分割成blocks并存储在 HDDS Container中。Container是 Ozone/HDDS 的基本复制单元。每个Container都有自己的元数据和数据, 数据以文件形式保存在磁盘上,元数据保存在RocksDB中。 -目前,数据节点上的每个Container都有一个RocksDB。随着用户数据的不断增长,一个DataNode上将会有成百上千个RocksDB实例。在一个JVM中管理如此多的RocksDB实例是一个巨大的挑战。 +之前,数据节点上每个Container都有一个RocksDB。随着用户数据的不断增长,一个DataNode上将会有成百上千个RocksDB实例。在一个JVM中管理如此多的RocksDB实例是一个巨大的挑战。 -与当前使用方法不同,"Merge Container RocksDB in DN"功能将为每个Volume只使用一个RocksDB,并在此RocksDB中保存所有Container的元数据。 +与以前的用法不同,"Merge Container RocksDB in DN"功能将为每个Volume只使用一个RocksDB,并在此RocksDB中保存所有Container的元数据。 ## 配置 -这主要是DataNode的功能,不需要太多配置。 +这主要是DataNode的功能,不需要太多配置。默认情况下,它是启用的。 如果更倾向于为每个Container使用一个RocksDB的模式,那么这下面的配置可以禁用上面所介绍的功能。请注意,一旦启用该功能,强烈建议以后不要再禁用。 diff --git a/hadoop-hdds/docs/content/interface/CSI.md b/hadoop-hdds/docs/content/interface/CSI.md index 59b24c94d19..84bd89c049e 100644 --- a/hadoop-hdds/docs/content/interface/CSI.md +++ b/hadoop-hdds/docs/content/interface/CSI.md @@ -57,7 +57,7 @@ Now, create the CSI related resources by execute the follow command. kubectl create -f /ozone/kubernetes/examples/ozone/csi ``` -## Crete pv-test and visit the result. +## Create pv-test and visit the result. Create pv-test related resources by execute the follow command. diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md index 1d0c5dcb4cc..c2f0f3af9b9 100644 --- a/hadoop-hdds/docs/content/interface/S3.md +++ b/hadoop-hdds/docs/content/interface/S3.md @@ -163,10 +163,3 @@ Or aws s3 ls --endpoint http://localhost:9878 s3://buckettest ``` -### S3 Fuse driver (goofys) - -[Goofys](https://github.com/kahing/goofys) is a S3 FUSE driver. As Ozone S3 gateway is AWS S3 compatible, it can be used to mount any Ozone buckets as an OS level mounted filesystem. - -```bash -goofys --endpoint http://localhost:9878 bucket1 /mount/bucket1 -``` diff --git a/hadoop-hdds/docs/content/interface/S3.zh.md b/hadoop-hdds/docs/content/interface/S3.zh.md index e3b133a0e16..370098e091e 100644 --- a/hadoop-hdds/docs/content/interface/S3.zh.md +++ b/hadoop-hdds/docs/content/interface/S3.zh.md @@ -142,10 +142,3 @@ aws s3api --endpoint http://localhost:9878 create-bucket --bucket buckettest aws s3 ls --endpoint http://localhost:9878 s3://buckettest ``` -### S3 Fuse 驱动(goofys) - -Goofys 是一个 S3 FUSE 驱动,可以将 Ozone 的桶挂载到 POSIX 文件系统。 - -```bash -goofys --endpoint http://localhost:9878 bucket1 /mount/bucket1 -``` diff --git a/hadoop-hdds/docs/content/security/GDPR.md b/hadoop-hdds/docs/content/security/GDPR.md index 25b2f2c4416..409a3ae7be0 100644 --- a/hadoop-hdds/docs/content/security/GDPR.md +++ b/hadoop-hdds/docs/content/security/GDPR.md @@ -1,6 +1,6 @@ --- title: "GDPR in Ozone" -date: "2019-September-17" +date: "2019-09-17" weight: 3 icon: user menu: diff --git a/hadoop-hdds/docs/content/security/GDPR.zh.md b/hadoop-hdds/docs/content/security/GDPR.zh.md index a7db4030871..8fd3514138f 100644 --- a/hadoop-hdds/docs/content/security/GDPR.zh.md +++ b/hadoop-hdds/docs/content/security/GDPR.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 中的 GDPR" -date: "2019-September-17" +date: "2019-09-17" weight: 3 summary: Ozone 中的 GDPR menu: diff --git a/hadoop-hdds/docs/content/security/SecureOzone.md b/hadoop-hdds/docs/content/security/SecureOzone.md index 76fd7470109..bbeef79b613 100644 --- a/hadoop-hdds/docs/content/security/SecureOzone.md +++ b/hadoop-hdds/docs/content/security/SecureOzone.md @@ -1,6 +1,6 @@ --- title: "Securing Ozone" -date: "2019-April-03" +date: "2019-04-03" summary: Overview of Ozone security concepts and steps to secure Ozone Manager and SCM. weight: 1 menu: diff --git a/hadoop-hdds/docs/content/security/SecureOzone.zh.md b/hadoop-hdds/docs/content/security/SecureOzone.zh.md index a7660233f4d..e74b5d8dfab 100644 --- a/hadoop-hdds/docs/content/security/SecureOzone.zh.md +++ b/hadoop-hdds/docs/content/security/SecureOzone.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 Ozone" -date: "2019-April-03" +date: "2019-04-03" summary: 简要介绍 Ozone 中的安全概念以及安全化 OM 和 SCM 的步骤。 weight: 1 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.md index 717e746cfb9..2254155e1f4 100644 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.md +++ b/hadoop-hdds/docs/content/security/SecuringDatanodes.md @@ -1,6 +1,6 @@ --- title: "Securing Datanodes" -date: "2019-April-03" +date: "2019-04-03" weight: 3 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md index 608be16e8a3..8b37fd2f6ee 100644 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 Datanode" -date: "2019-April-03" +date: "2019-04-03" weight: 3 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md index 47c04eb94d9..a8601d7a5e1 100644 --- a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md +++ b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md @@ -1,6 +1,6 @@ --- title: "Securing HTTP" -date: "2020-June-17" +date: "2020-06-17" summary: Secure HTTP web-consoles for Ozone services weight: 4 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md index 07b3f6164f6..5907a7caf9a 100644 --- a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 HTTP" -date: "2020-June-17" +date: "2020-06-17" summary: 安全化 Ozone 服务的 HTTP 网络控制台 weight: 4 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringS3.md b/hadoop-hdds/docs/content/security/SecuringS3.md index e6218b95e91..04ef6921af6 100644 --- a/hadoop-hdds/docs/content/security/SecuringS3.md +++ b/hadoop-hdds/docs/content/security/SecuringS3.md @@ -1,6 +1,6 @@ --- title: "Securing S3" -date: "2019-April-03" +date: "2019-04-03" summary: Ozone supports S3 protocol, and uses AWS Signature Version 4 protocol which allows a seamless S3 experience. weight: 5 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringS3.zh.md b/hadoop-hdds/docs/content/security/SecuringS3.zh.md index 218786fd366..395b9303354 100644 --- a/hadoop-hdds/docs/content/security/SecuringS3.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringS3.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 S3" -date: "2019-April-03" +date: "2019-04-03" summary: Ozone 支持 S3 协议,并使用 AWS Signature Version 4 protocol which allows a seamless S3 experience. weight: 5 diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md index 3b75bee1bfd..0d04a28aec7 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.md @@ -1,6 +1,6 @@ --- title: "Transparent Data Encryption" -date: "2019-April-03" +date: "2019-04-03" summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. weight: 2 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md index ed42519e0b2..d7fa4941e44 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md @@ -1,6 +1,6 @@ --- title: "透明数据加密" -date: "2019-April-03" +date: "2019-04-03" summary: 透明数据加密(Transparent Data Encryption,TDE)以密文形式在磁盘上保存数据,但可以在用户访问的时候自动进行解密。 weight: 2 menu: diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md index 9976cbbc4fb..ee48999ed25 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.md @@ -1,6 +1,6 @@ --- title: "Ozone ACLs" -date: "2019-April-03" +date: "2019-04-03" weight: 6 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md index 3d95fcf0877..99751cd62da 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 访问控制列表" -date: "2019-April-03" +date: "2019-04-03" weight: 6 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityWithRanger.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.md index bbbd8c19f32..7dc1895ad3d 100644 --- a/hadoop-hdds/docs/content/security/SecurityWithRanger.md +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.md @@ -1,6 +1,6 @@ --- title: "Apache Ranger" -date: "2019-April-03" +date: "2019-04-03" weight: 7 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md index b7c7b8721bb..8917c0b84bc 100644 --- a/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md @@ -1,6 +1,6 @@ --- title: "Apache Ranger" -date: "2019-April-03" +date: "2019-04-03" weight: 7 menu: main: diff --git a/hadoop-hdds/docs/content/tools/Debug.md b/hadoop-hdds/docs/content/tools/Debug.md new file mode 100644 index 00000000000..79c11f777ef --- /dev/null +++ b/hadoop-hdds/docs/content/tools/Debug.md @@ -0,0 +1,473 @@ +--- +title: "Ozone Debug" +date: 2024-10-14 +summary: Ozone Debug command can be used for all the debugging related tasks. +--- + + +Ozone Debug command (`ozone debug`) is a collection of developer tools intended to help in debugging and get more information of various components of ozone. + +```bash +Usage: ozone debug [-hV] [--verbose] [-conf=] + [-D=]... [COMMAND] +Developer tools for Ozone Debug operations + -conf= + path to the ozone configuration file + -D, --set= + Map of (configuration_key,configuration_value) for any + configuration overrides + -h, --help Show this help message and exit. + -V, --version Print version information and exit. + --verbose More verbose output. Show the stack trace of the errors. +Commands: + chunkinfo returns chunk location information about an + existing key + print-log-dag, pld Create an image of the current compaction log DAG + in OM. + find-missing-padding, fmp List all keys with any missing padding, optionally + limited to a volume/bucket/key URI. + recover recover the lease of a specified file. Make sure + to specify file system scheme if ofs:// is not + the default. + prefix Parse prefix contents + ldb Parse rocksdb file content + read-replicas Reads every replica for all the blocks associated + with a given key. + container Container replica specific operations to be + executed on datanodes only + ratislogparser Shell of printing Ratis Log in understandable text +``` +For more detailed usage see the output of `--help` for each of the subcommands. + + +## ozone debug ldb + +Ozone heavily uses RocksDB for storing metadata. This tool helps parse the contents of RocksDB belonging to Ozone Roles. +Supported DB's : Ozone Manager (om.db) , StorageContainerManager (scm.db), Datanode/Container (container.db) +Below is the usage: + +```bash +Usage: ozone debug ldb --db= [COMMAND] +Parse rocksdb file content + --db= Database File Path +Commands: + scan Parse specified metadataTable + list_column_families, ls list all column families in db. + value-schema Schema of value in metadataTable +``` + +### list_column_families command + +`list_column_families` command lists all the column families in the db provided. + +```bash +$ ozone debug ldb --db=/path/to/scm.db ls +default +sequenceId +revokedCertsV2 +pipelines +crls +crlSequenceId +meta +containers +validCerts +validSCMCerts +scmTransactionInfos +deletedBlocks +statefulServiceConfig +revokedCerts +move +``` + +### scan command + +`scan` command parses a particular column family of a rocksdb provided and prints the records. + +```bash +Usage: ozone debug ldb scan [--compact] [--count] [--with-keys] + [--batch-size=] --cf= + [--cid=] [-d=] + [-e=] [--fields=] + [--filter=] [-l=] [-o=] + [-s=] [--thread-count=] +Parse specified metadataTable + --batch-size= + Batch size for processing DB data. + --cf, --column_family, --column-family= + Table name + --cid, --container-id= + Container ID. Applicable if datanode DB Schema is V3 + --compact disable the pretty print the output + --count, --show-count + Get estimated key count for the given DB column family + Default: false + -d, --dnSchema, --dn-schema= + Datanode DB Schema Version: V1/V2/V3 + -e, --ek, --endkey= + Key at which iteration of the DB ends + --fields= + Comma-separated list of fields needed for each value. + eg.) "name,acls.type" for showing name and type + under acls. + --filter= Comma-separated list of "::" + where is any valid field of the record, + is [EQUALS,LESSER, GREATER or REGEX]. + (EQUALS compares the exact string, REGEX compares + with a valid regular expression passed, and + LESSER/GREATER works with numeric values), + is the value of the field. + eg.) "dataSize:equals:1000" for showing records + having the value 1000 for dataSize, + "keyName:regex:^key.*$" for showing records + having keyName that matches the given regex. + -l, --limit, --length= + Maximum number of items to list. + -o, --out= File to dump table scan data + -s, --sk, --startkey= + Key from which to iterate the DB + --thread-count= + Thread count for concurrent processing. + --with-keys Print a JSON object of key->value pairs (default) + instead of a JSON array of only values. +``` +By default, the contents are printed on the console, but it can be redirected to a file using the `--out` option.
    +`--length` can be used to limit the number of records being printed.
    +`--count` doesn't print the records, it shows the approximate number of records. This is not accurate.
    +`ozone debug ldb scan` command provides many filtering options to make debugging easier, elaborated below:
    + + + +#### --startkey and --endkey +As the names suggest, these options specify the keys from/until which the iteration needs to happen.
    +`--startkey` specifies which key to start iterating from, it is inclusive. `--endkey` specifies which key to stop iterating at, it is exclusive. + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --startkey=vol3 --endkey=vol5 +``` +```json +{ "/vol3": { + "metadata" : { }, + "objectID" : -9999, + "updateID" : 4000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol3", + "creationTime" : 1707192335309, + "modificationTime" : 1714057412205, + "quotaInBytes" : 22854448694951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 1, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} +, "/vol4": { + "metadata" : { }, + "objectID" : -888, + "updateID" : 5000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol4", + "creationTime" : 1696280979907, + "modificationTime" : 1696280979907, + "quotaInBytes" : 2251799813685250, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` + +#### --fields +There are multiple fields in each record. `--fields` option allows us to choose the specific fields to display. + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=keyTable -l=1 --fields="volumeName,bucketName,keyName,keyLocationVersions.version,acls.name" +``` +```json +{ "/vol1/ozone-legacy-bucket/10T-1-terasort-input/": { + "keyLocationVersions" : [ { + "version" : 0 + } ], + "keyName" : "10T-1-terasort-input/", + "bucketName" : "ozone-legacy-bucket", + "acls" : [ { + "name" : "om" + }, { + "name" : "scm" + }, { + "name" : "testuser" + } ], + "volumeName" : "vol1" +} +} +``` + +#### --filter +`--filter` can be used to select records whose value matches a given condition. The filter is given in this format: `::`, +where `` is any valid field from the value of the record, `` is one of the 4 supported operations `[equals, regex, lesser, greater]`, `` is the value used for the comparison.
    +'Equals' and 'regex' work with string, bool and numerical fields, 'lesser' and 'greater' work only with numerical values.
    +Multiple filters can also be given in one command, they need to be separated by commas.
    +Using `equals` operator: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } +, "/vol5": { + "metadata" : { }, + "objectID" : -956599, + "updateID" : 45600, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol5", + "creationTime" : 1807192332309, + "modificationTime" : 1914057410005, + "quotaInBytes" : 7785494951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` +Using `lesser` operator (`greater` operator can also be used in the same way): +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:lesser:2" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` +Using `regex` operator: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="volume:regex:^v.*2$" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +Using multiple filters: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2,volume:regex:^.*4$" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +### value-schema command + +`value-schema` command shows the schema of the value stored in a column-family of a rocksdb, i.e., it shows the fields stored in the value and it's datatype. +`--depth` can be used optionally to limit the level until which the fields are fetched. + +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable --depth=1 +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : "struct", + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : "struct", + "keyName" : "String", + "replicationConfig" : "struct", + "encInfo" : "struct", + "dataSize" : "long", + "tags" : "struct", + "keyLocationVersions" : "struct", + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : "struct", + "objectID" : "long" +} + } +``` +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : { }, + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : { + "toStringMethod" : { }, + "hashCodeMethod" : { }, + "name" : "String", + "type" : { + "name" : "String", + "value" : "String", + "ordinal" : "int" + }, + "aclScope" : { + "name" : "String", + "ordinal" : "int" + }, + "aclBits" : "int" + }, + "keyName" : "String", + "replicationConfig" : { }, + "encInfo" : { + "ezKeyVersionName" : "String", + "keyName" : "String", + "edek" : { }, + "cipherSuite" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "algoBlockSize" : "int", + "ordinal" : "int" + }, + "version" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "description" : "String", + "version" : "int", + "ordinal" : "int" + }, + "iv" : { } + }, + "dataSize" : "long", + "tags" : { }, + "keyLocationVersions" : { + "isMultipartKey" : "boolean", + "locationVersionMap" : { }, + "version" : "long" + }, + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : { }, + "objectID" : "long" + } +} +``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/tools/Debug.zh.md b/hadoop-hdds/docs/content/tools/Debug.zh.md new file mode 100644 index 00000000000..3f3238dd84b --- /dev/null +++ b/hadoop-hdds/docs/content/tools/Debug.zh.md @@ -0,0 +1,466 @@ +--- +title: "Ozone Debug" +date: 2024-10-14 +summary: Ozone Debug 命令可用于所有与调试相关的任务。 +--- + + +Ozone Debug 命令 (`ozone debug`) 是开发人员工具的集合,旨在帮助调试并获取 Ozone 各个组件的更多信息。 + +```bash +Usage: ozone debug [-hV] [--verbose] [-conf=] + [-D=]... [COMMAND] +Developer tools for Ozone Debug operations + -conf= + path to the ozone configuration file + -D, --set= + a map of (configuration_key,configuration_value) for any overrides + -h, --help Show this help message and exit. + -V, --version Print version information and exit. + --verbose More verbose output. Show the stack trace of the errors. +``` +子命令: + chunkinfo 返回指定文件/对象的块位置信息。 + print-log-dag, pld 在 OM 中创建当前压缩日志 DAG 的镜像。 + find-missing-padding, fmp 列出所有缺少填充的文件/对象,可以选择指定卷/存储桶/键 URI。 + recover 恢复指定文件的租约。如果默认值不是 ofs:// ,请确保指定文件系统schema。 + prefix 解析前缀内容。 + ldb 解析 rocksdb 文件内容。 + read-replicas 读取给定路径文件/对象所有块的每个副本。 + container 容器副本特定操作,仅在数据节点上执行。 + ratislogparser 解析Ratis Log 成用户可理解的文字形式。 + +有关更详细的用法,请参阅每个子命令的“--help”输出。 + + +## ozone debug ldb + +Ozone 大量使用 RocksDB 来存储元数据。该工具帮助解析各个Ozone Roles 的 RocksDB 数据内容。 +支持的数据库:Ozone Manager (om.db)、StorageContainerManager (scm.db)、Datanode/Container (container.db) +下面是用法: + +```bash +Usage: ozone debug ldb --db= [COMMAND] +Parse rocksdb file content + --db= Database File Path +Commands: + scan Parse specified metadataTable + list_column_families, ls list all column families in db. + value-schema Schema of value in metadataTable +``` + +### list_column_families command + +`list_column_families` 命令列出指定数据库中的所有列族。 + +```bash +$ ozone debug ldb --db=/path/to/scm.db ls +default +sequenceId +revokedCertsV2 +pipelines +crls +crlSequenceId +meta +containers +validCerts +validSCMCerts +scmTransactionInfos +deletedBlocks +statefulServiceConfig +revokedCerts +move +``` + +### scan command + +`scan` 命令解析提供的 rocksdb 的特定列族并打印记录。 + +```bash +Usage: ozone debug ldb scan [--compact] [--count] [--with-keys] + [--batch-size=] --cf= + [--cid=] [-d=] + [-e=] [--fields=] + [--filter=] [-l=] [-o=] + [-s=] [--thread-count=] +Parse specified metadataTable + --batch-size= + Batch size for processing DB data. + --cf, --column_family, --column-family= + Table name + --cid, --container-id= + Container ID. Applicable if datanode DB Schema is V3 + --compact disable the pretty print the output + --count, --show-count + Get estimated key count for the given DB column family + Default: false + -d, --dnSchema, --dn-schema= + Datanode DB Schema Version: V1/V2/V3 + -e, --ek, --endkey= + Key at which iteration of the DB ends + --fields= + Comma-separated list of fields needed for each value. + eg.) "name,acls.type" for showing name and type + under acls. + --filter= Comma-separated list of "::" + where is any valid field of the record, + is [EQUALS,LESSER, GREATER or REGEX]. + (EQUALS compares the exact string, REGEX compares + with a valid regular expression passed, and + LESSER/GREATER works with numeric values), + is the value of the field. + eg.) "dataSize:equals:1000" for showing records + having the value 1000 for dataSize, + "keyName:regex:^key.*$" for showing records + having keyName that matches the given regex. + -l, --limit, --length= + Maximum number of items to list. + -o, --out= File to dump table scan data + -s, --sk, --startkey= + Key from which to iterate the DB + --thread-count= + Thread count for concurrent processing. + --with-keys Print a JSON object of key->value pairs (default) + instead of a JSON array of only values. +``` +默认情况下,内容打印在控制台上,但可以使用 `--out` 选项将其重定向到文件。
    +`--length` 可用于限制打印的记录数。
    +`--count` 不打印记录,它显示大概的,并不是完全精确的记录数。
    +`ozone debug ldb scan` 命令提供了许多过滤选项以使调试更容易,详细说明如下:
    + + + +#### --startkey and --endkey +顾名思义,这些选项指定迭代需要发生的键。
    +`--startkey` 指定从哪个键开始迭代,包含该键。 `--endkey` 指定停止迭代的键,不包含该键。 + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --startkey=vol3 --endkey=vol5 +``` +```json +{ "/vol3": { + "metadata" : { }, + "objectID" : -9999, + "updateID" : 4000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol3", + "creationTime" : 1707192335309, + "modificationTime" : 1714057412205, + "quotaInBytes" : 22854448694951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 1, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} +, "/vol4": { + "metadata" : { }, + "objectID" : -888, + "updateID" : 5000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol4", + "creationTime" : 1696280979907, + "modificationTime" : 1696280979907, + "quotaInBytes" : 2251799813685250, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` + +#### --fields +每条记录中有多个字段。 `--fields` 选项允许我们选择要显示的特定字段。 + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=keyTable -l=1 --fields="volumeName,bucketName,keyName,keyLocationVersions.version,acls.name" +``` +```json +{ "/vol1/ozone-legacy-bucket/10T-1-terasort-input/": { + "keyLocationVersions" : [ { + "version" : 0 + } ], + "keyName" : "10T-1-terasort-input/", + "bucketName" : "ozone-legacy-bucket", + "acls" : [ { + "name" : "om" + }, { + "name" : "scm" + }, { + "name" : "testuser" + } ], + "volumeName" : "vol1" +} +} +``` + +#### --filter +`--filter` 可用于选择值与给定条件匹配的记录。过滤器按以下格式给出:`::`, +其中“”是记录值中的任何有效字段,“”是 4 个支持的操作 `[equals, regex, lesser, greater]` 之一,“”是使用的值用于比较。
    +`Equals` 和 `regex` 适用于字符串、布尔值和数字字段,`lesser` 和 `greater` 仅适用于数字值。
    +也可以在一个命令中给出多个过滤器,它们需要用逗号分隔。
    +使用 `equals` (等于) 运算符: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } +, "/vol5": { + "metadata" : { }, + "objectID" : -956599, + "updateID" : 45600, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol5", + "creationTime" : 1807192332309, + "modificationTime" : 1914057410005, + "quotaInBytes" : 7785494951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` +使用 `lesser` (较小) 运算符(`greater`(较大) 运算符也可以以相同的方式使用): +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:lesser:2" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` +使用 `regex` 运算符: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="volume:regex:^v.*2$" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +使用多个过滤器: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2,volume:regex:^.*4$" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +### value-schema command + +“value-schema”命令显示存储在rocksdb的列族中的值的模式,即,它显示存储在值中的字段及其数据类型。 +可以选择使用`--depth`来限制获取字段的级别。 + +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable --depth=1 +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : "struct", + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : "struct", + "keyName" : "String", + "replicationConfig" : "struct", + "encInfo" : "struct", + "dataSize" : "long", + "tags" : "struct", + "keyLocationVersions" : "struct", + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : "struct", + "objectID" : "long" +} + } +``` +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : { }, + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : { + "toStringMethod" : { }, + "hashCodeMethod" : { }, + "name" : "String", + "type" : { + "name" : "String", + "value" : "String", + "ordinal" : "int" + }, + "aclScope" : { + "name" : "String", + "ordinal" : "int" + }, + "aclBits" : "int" + }, + "keyName" : "String", + "replicationConfig" : { }, + "encInfo" : { + "ezKeyVersionName" : "String", + "keyName" : "String", + "edek" : { }, + "cipherSuite" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "algoBlockSize" : "int", + "ordinal" : "int" + }, + "version" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "description" : "String", + "version" : "int", + "ordinal" : "int" + }, + "iv" : { } + }, + "dataSize" : "long", + "tags" : { }, + "keyLocationVersions" : { + "isMultipartKey" : "boolean", + "locationVersionMap" : { }, + "version" : "long" + }, + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : { }, + "objectID" : "long" + } +} +``` \ No newline at end of file diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index d14ae28c10d..7f4ffbb8a70 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-docs - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone/HDDS Documentation Apache Ozone/HDDS Documentation jar diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index 201336d5ed3..b540d1c68ea 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-erasurecode - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Earsurecode utils Apache Ozone HDDS Erasurecode diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java index 83650c132b0..2069a51be17 100644 --- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java +++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java @@ -19,9 +19,9 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeXORRawErasureCoderFactory; +import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,6 @@ import java.util.Map; import java.util.ServiceLoader; import java.util.Set; -import java.util.stream.Collectors; /** * This class registers all coder implementations. @@ -108,8 +107,8 @@ void updateCoders(Iterable coderFactories) { String codecName = entry.getKey(); List coders = entry.getValue(); coderNameMap.put(codecName, coders.stream(). - map(RawErasureCoderFactory::getCoderName). - collect(Collectors.toList()).toArray(new String[0])); + map(RawErasureCoderFactory::getCoderName) + .toArray(String[]::new)); } } diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 70cce849aec..37d41cde390 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-server-framework - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Server Framework Apache Ozone HDDS Server Framework diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java index 5b283c3a1a3..fde555208b3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java @@ -31,23 +31,6 @@ @ConfigGroup(prefix = HDDS_DATANODE_RATIS_PREFIX_KEY + "." + GrpcConfigKeys.PREFIX) public class DatanodeRatisGrpcConfig { - @Config(key = "message.size.max", - defaultValue = "32MB", - type = ConfigType.SIZE, - tags = {OZONE, CLIENT, PERFORMANCE}, - description = "Maximum message size allowed to be received by Grpc " + - "Channel (Server)." - ) - private int maximumMessageSize = 32 * 1024 * 1024; - - public int getMaximumMessageSize() { - return maximumMessageSize; - } - - public void setMaximumMessageSize(int maximumMessageSize) { - this.maximumMessageSize = maximumMessageSize; - } - @Config(key = "flow.control.window", defaultValue = "5MB", type = ConfigType.SIZE, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java index cbb4f3fc2ee..0cb39482e98 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java @@ -118,7 +118,6 @@ String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails, /** * Get Root CA certificate. - * @return * @throws IOException */ String getRootCACertificate() throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java index a938d53c7c4..71918308f14 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java @@ -330,7 +330,6 @@ public SCMGetCertResponseProto getCACert() throws IOException { * @param role - node type: OM/SCM/DN. * @param startSerialId - start cert serial id. * @param count - max number of certificates returned in a batch. - * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java index 42e8f8202cb..4690054a87d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java @@ -36,6 +36,7 @@ public class MoveDataNodePair { Proto2Codec.get(MoveDataNodePairProto.getDefaultInstance()), MoveDataNodePair::getFromProtobuf, pair -> pair.getProtobufMessage(ClientVersion.CURRENT_VERSION), + MoveDataNodePair.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 1f114304cca..d9b198d4b14 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -307,7 +307,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException { } /** * Sort the datanodes based on distance from client. - * @return List + * @return list of datanodes; * @throws IOException */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 11231d2d01b..5e293eae67b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -109,6 +109,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoRequestProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -129,6 +130,7 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Arrays; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -158,6 +160,12 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB private final StorageContainerLocationProtocolPB rpcProxy; private final SCMContainerLocationFailoverProxyProvider fpp; + /** + * This is used to check if 'leader' or 'follower' exists, + * in order to confirm whether we have enabled Ratis. + */ + private final List scmRatisRolesToCheck = Arrays.asList("leader", "follower"); + /** * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. * @@ -383,19 +391,19 @@ public List getExistContainerWithPipelinesInBatch( * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, int count) + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -437,12 +445,17 @@ public List listContainer(long startContainerID, int count, .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } - return containerList; + + if (response.hasContainerCount()) { + return new ContainerListResult(containerList, response.getContainerCount()); + } else { + return new ContainerListResult(containerList, -1); + } } @Deprecated @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { throw new UnsupportedOperationException("Should no longer be called from " + @@ -761,8 +774,23 @@ public ScmInfo getScmInfo() throws IOException { .setScmId(resp.getScmId()) .setRatisPeerRoles(resp.getPeerRolesList()); - return builder.build(); + // By default, we assume that SCM Ratis is not enabled. + // If the response contains the `ScmRatisEnabled` field, + // we will set it directly; otherwise, + // we will determine if Ratis is enabled based on + // whether the `peerRolesList` contains the keywords 'leader' or 'follower'. + if (resp.hasScmRatisEnabled()) { + builder.setScmRatisEnabled(resp.getScmRatisEnabled()); + } else { + List peerRolesList = resp.getPeerRolesList(); + if (!peerRolesList.isEmpty()) { + boolean containsScmRoles = peerRolesList.stream().map(String::toLowerCase) + .anyMatch(scmRatisRolesToCheck::contains); + builder.setScmRatisEnabled(containsScmRoles); + } + } + return builder.build(); } @Override @@ -1188,7 +1216,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainer(startContainerID, count, state); + return listContainer(startContainerID, count, state).getContainerInfoList(); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java index c79d6f5aa27..07cd635dab4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java @@ -17,12 +17,6 @@ package org.apache.hadoop.hdds.security; import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import java.io.ByteArrayInputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.IOException; import java.security.KeyPair; import java.security.PrivateKey; import java.security.PublicKey; @@ -32,8 +26,6 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SecretKeyProto; /** * Wrapper class for Ozone/Hdds secret keys. Used in delegation tokens and block @@ -41,7 +33,7 @@ */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class OzoneSecretKey implements Writable { +public class OzoneSecretKey { private int keyId; private long expiryDate; @@ -107,28 +99,6 @@ public void setExpiryDate(long expiryDate) { this.expiryDate = expiryDate; } - @Override - public void write(DataOutput out) throws IOException { - SecretKeyProto token = SecretKeyProto.newBuilder() - .setKeyId(getKeyId()) - .setExpiryDate(getExpiryDate()) - .setPrivateKeyBytes(ByteString.copyFrom(getEncodedPrivateKey())) - .setPublicKeyBytes(ByteString.copyFrom(getEncodedPubliceKey())) - .build(); - out.write(token.toByteArray()); - } - - @Override - public void readFields(DataInput in) throws IOException { - SecretKeyProto secretKey = SecretKeyProto.parseFrom((DataInputStream) in); - expiryDate = secretKey.getExpiryDate(); - keyId = secretKey.getKeyId(); - privateKey = SecurityUtil.getPrivateKey(secretKey.getPrivateKeyBytes() - .toByteArray(), securityConfig); - publicKey = SecurityUtil.getPublicKey(secretKey.getPublicKeyBytes() - .toByteArray(), securityConfig); - } - @Override public int hashCode() { HashCodeBuilder hashCodeBuilder = new HashCodeBuilder(537, 963); @@ -158,25 +128,4 @@ public boolean equals(Object obj) { return false; } - /** - * Reads protobuf encoded input stream to construct {@link OzoneSecretKey}. - */ - static OzoneSecretKey readProtoBuf(DataInput in) throws IOException { - Preconditions.checkNotNull(in); - SecretKeyProto key = SecretKeyProto.parseFrom((DataInputStream) in); - return new OzoneSecretKey(key.getKeyId(), key.getExpiryDate(), - key.getPrivateKeyBytes().toByteArray(), - key.getPublicKeyBytes().toByteArray()); - } - - /** - * Reads protobuf encoded input stream to construct {@link OzoneSecretKey}. - */ - static OzoneSecretKey readProtoBuf(byte[] identifier) throws IOException { - Preconditions.checkNotNull(identifier); - DataInputStream in = new DataInputStream(new ByteArrayInputStream( - identifier)); - return readProtoBuf(in); - } - } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java index 601bdf0ea72..b25941ca676 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java @@ -172,14 +172,13 @@ public int incrementDelegationTokenSeqNum() { */ private OzoneSecretKey updateCurrentKey(KeyPair keyPair, X509Certificate certificate) { - logger.info("Updating current master key for generating tokens. Cert id {}", - certificate.getSerialNumber().toString()); - int newCurrentId = incrementCurrentKeyId(); OzoneSecretKey newKey = new OzoneSecretKey(newCurrentId, certificate.getNotAfter().getTime(), keyPair, certificate.getSerialNumber().toString()); currentKey.set(newKey); + logger.info("Updated current master key for generating tokens. Cert id {}, Master key id {}", + certificate.getSerialNumber().toString(), newKey.getKeyId()); return newKey; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java index e7e029f7087..da651160d04 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java @@ -50,8 +50,7 @@ void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) throws SCMSecurityException; - /** Same as {@link #verify(Token, - * ContainerCommandRequestProtoOrBuilder)}, but with encoded token. */ + /** Same as {@link #verify}, but with encoded token. */ default void verify(ContainerCommandRequestProtoOrBuilder cmd, String encodedToken) throws SCMSecurityException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java index b78604643e5..79f41fba865 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java @@ -39,7 +39,8 @@ public final class CertInfo implements Comparable, Serializable { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(CertInfoProto.getDefaultInstance()), CertInfo::fromProtobuf, - CertInfo::getProtobuf); + CertInfo::getProtobuf, + CertInfo.class); public static Codec getCodec() { return CODEC; @@ -133,7 +134,6 @@ public String toString() { /** * Builder class for CertInfo. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private X509Certificate x509Certificate; private long timestamp; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java index b2d62443b77..5a39d0f1dd0 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java @@ -104,7 +104,7 @@ public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) { * @param certSerialId - the new certificate id. * @return Signed Certificate. * @throws IOException - On Error - * @throws OperatorCreationException - on Error. + * @throws CertificateException - on Error. */ @SuppressWarnings("ParameterNumber") @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java index a93bdb4e3d6..118aa826013 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java @@ -195,8 +195,6 @@ public CertPath getCaCertPath() * * @param certSerialId - Certificate for this CA. * @return X509Certificate - * @throws CertificateException - usually thrown if this CA is not - * initialized. * @throws IOException - on Error. */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index 70a475982bd..42292b9663f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -73,6 +73,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; import org.apache.hadoop.hdds.security.ssl.ReloadingX509TrustManager; @@ -667,6 +668,8 @@ protected enum InitCase { * certificate. * * Truth table: + *
    +   * {@code
        *  +--------------+---------------+--------------+---------------------+
        *  | Private Key  | Public Keys   | Certificate  |   Result            |
        *  +--------------+---------------+--------------+---------------------+
    @@ -679,7 +682,8 @@ protected enum InitCase {
        *  | True   (1)   | True    (1)   | False  (0)   |   GETCERT->SUCCESS  |
        *  | True   (1)   | True    (1)   | True   (1)   |   SUCCESS           |
        *  +--------------+-----------------+--------------+----------------+
    -   *
    +   * }
    +   * 
    * Success in following cases: * 1. If keypair as well certificate is available. * 2. If private key and certificate is available and public key is @@ -983,43 +987,6 @@ public Set getAllCaCerts() { return certs; } - @Override - public List getCAList() { - pemEncodedCACertsLock.lock(); - try { - return pemEncodedCACerts; - } finally { - pemEncodedCACertsLock.unlock(); - } - } - - public List listCA() throws IOException { - pemEncodedCACertsLock.lock(); - try { - if (pemEncodedCACerts == null) { - updateCAList(); - } - return pemEncodedCACerts; - } finally { - pemEncodedCACertsLock.unlock(); - } - } - - @Override - public List updateCAList() throws IOException { - pemEncodedCACertsLock.lock(); - try { - pemEncodedCACerts = getScmSecureClient().listCACertificate(); - return pemEncodedCACerts; - } catch (Exception e) { - getLogger().error("Error during updating CA list", e); - throw new CertificateException("Error during updating CA list", e, - CERTIFICATE_ERROR); - } finally { - pemEncodedCACertsLock.unlock(); - } - } - @Override public ReloadingX509TrustManager getTrustManager() throws CertificateException { try { @@ -1049,8 +1016,20 @@ public ReloadingX509KeyManager getKeyManager() throws CertificateException { } } + @Override + public ClientTrustManager createClientTrustManager() throws IOException { + CACertificateProvider caCertificateProvider = () -> { + List caCerts = new ArrayList<>(); + caCerts.addAll(getAllCaCerts()); + caCerts.addAll(getAllRootCaCerts()); + return caCerts; + }; + return new ClientTrustManager(caCertificateProvider, caCertificateProvider); + } + /** * Register a receiver that will be called after the certificate renewed. + * * @param receiver */ @Override @@ -1107,7 +1086,7 @@ public Duration timeBeforeExpiryGracePeriod(X509Certificate certificate) { * Renew keys and certificate. Save the keys are certificate to disk in new * directories, swap the current key directory and certs directory with the * new directories. - * @param force, check certificate expiry time again if force is false. + * @param force check certificate expiry time again if force is false. * @return String, new certificate ID * */ public String renewAndStoreKeyAndCertificate(boolean force) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java index 12b6b64f49a..1f8568866d8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java @@ -17,13 +17,16 @@ */ package org.apache.hadoop.hdds.server; +import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; import com.google.common.collect.Sets; +import jakarta.annotation.Nullable; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -33,6 +36,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS_GROUPS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS; /** * This class contains ozone admin user information, username and group, @@ -186,4 +191,88 @@ public static Collection getOzoneReadOnlyAdminsGroupsFromConfig( return conf.getTrimmedStringCollection( OZONE_READONLY_ADMINISTRATORS_GROUPS); } + + /** + * Get the list of S3 administrators from Ozone config. + *

    + * Notes: + *

      + *
    • If ozone.s3.administrators value is empty string or unset, + * defaults to ozone.administrators value.
    • + *
    • If current user is not part of the administrators group, + * {@link UserGroupInformation#getCurrentUser()} will be added to the resulting list
    • + *
    + * @param conf An instance of {@link OzoneConfiguration} being used + * @return A {@link Collection} of the S3 administrator users + */ + public static Set getS3AdminsFromConfig(OzoneConfiguration conf) throws IOException { + Set ozoneAdmins = new HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS)); + + if (ozoneAdmins.isEmpty()) { + ozoneAdmins = new HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS)); + } + + String omSPN = UserGroupInformation.getCurrentUser().getShortUserName(); + ozoneAdmins.add(omSPN); + + return ozoneAdmins; + } + + /** + * Get the list of the groups that are a part of S3 administrators from Ozone config. + *

    + * Note: If ozone.s3.administrators.groups value is empty or unset, + * defaults to the ozone.administrators.groups value + * + * @param conf An instance of {@link OzoneConfiguration} being used + * @return A {@link Collection} of the S3 administrator groups + */ + public static Set getS3AdminsGroupsFromConfig(OzoneConfiguration conf) { + Set s3AdminsGroup = new HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS)); + + if (s3AdminsGroup.isEmpty() && conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) { + s3AdminsGroup = new HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS)); + } + + return s3AdminsGroup; + } + + /** + * Get the users and groups that are a part of S3 administrators. + * @param conf Stores an instance of {@link OzoneConfiguration} being used + * @return an instance of {@link OzoneAdmins} containing the S3 admin users and groups + */ + public static OzoneAdmins getS3Admins(OzoneConfiguration conf) { + Set s3Admins; + try { + s3Admins = getS3AdminsFromConfig(conf); + } catch (IOException ie) { + s3Admins = Collections.emptySet(); + } + Set s3AdminGroups = getS3AdminsGroupsFromConfig(conf); + + return new OzoneAdmins(s3Admins, s3AdminGroups); + } + + /** + * Check if the provided user is an S3 administrator. + * @param user An instance of {@link UserGroupInformation} with information about the user to verify + * @param s3Admins An instance of {@link OzoneAdmins} containing information + * of the S3 administrator users and groups in the system + * @return {@code true} if the provided user is an S3 administrator else {@code false} + */ + public static boolean isS3Admin(@Nullable UserGroupInformation user, OzoneAdmins s3Admins) { + return null != user && s3Admins.isAdmin(user); + } + + /** + * Check if the provided user is an S3 administrator. + * @param user An instance of {@link UserGroupInformation} with information about the user to verify + * @param conf An instance of {@link OzoneConfiguration} being used + * @return {@code true} if the provided user is an S3 administrator else {@code false} + */ + public static boolean isS3Admin(@Nullable UserGroupInformation user, OzoneConfiguration conf) { + OzoneAdmins s3Admins = getS3Admins(conf); + return isS3Admin(user, s3Admins); + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java index bcd75f3f215..f966ef00932 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java @@ -33,6 +33,15 @@ */ public interface ServiceRuntimeInfo { + /** + * Gets the namespace of Ozone. + * + * @return the namespace + */ + default String getNamespace() { + return ""; + }; + /** * Gets the version of Hadoop. * @@ -47,13 +56,6 @@ public interface ServiceRuntimeInfo { */ String getSoftwareVersion(); - /** - * Get the compilation information which contains date, user and branch. - * - * @return the compilation information, as a JSON string. - */ - String getCompileInfo(); - /** * Gets the NN start time in milliseconds. * diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java index 987f4aee031..74ba3c5b629 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java @@ -42,12 +42,6 @@ public String getSoftwareVersion() { return versionInfo.getVersion(); } - @Override - public String getCompileInfo() { - return versionInfo.getDate() + " by " + versionInfo.getUser() + " from " - + versionInfo.getBranch(); - } - @Override public long getStartedTimeInMillis() { return startedTimeInMillis; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index f27f42e0b4c..9d037fed6bc 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -123,8 +123,8 @@ /** * Create a Jetty embedded server to answer http requests. The primary goal is * to serve up status information for the server. There are three contexts: - * "/logs/" -> points to the log directory "/static/" -> points to common static - * files (src/webapps/static) "/" -> the jsp server code from + * "/logs/" -> points to the log directory "/static/" -> points to common static + * files (src/webapps/static) "/" -> the jsp server code from * (src/webapps/) * * This class is a fork of the old HttpServer. HttpServer exists for diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java index f4f188aaf39..bceec92c6c8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java @@ -41,11 +41,12 @@ import org.slf4j.LoggerFactory; /** + *

      * Servlet that runs async-profiler as web-endpoint.
    - * 

    + * * Source: https://github.com/apache/hive/blob/master/common/src/java/org * /apache/hive/http/ProfileServlet.java - *

    + * * Following options from async-profiler can be specified as query parameter. * // -e event profiling event: cpu|alloc|lock|cache-misses etc. * // -d duration run profiling for seconds @@ -79,7 +80,7 @@ * curl "http://localhost:10002/prof" * - To collect 1 minute CPU profile of current process and output in tree * format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" + * curl "http://localhost:10002/prof?output=tree&duration=60" * - To collect 30 second heap allocation profile of current process (returns * FlameGraph svg) * curl "http://localhost:10002/prof?event=alloc" @@ -111,6 +112,7 @@ * The default output format of the newest async profiler is HTML. * If the user is using an older version such as 1.5, HTML is not supported. * Please specify the corresponding output format. + *

    */ public class ProfileServlet extends HttpServlet { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java index 0d01aa43b42..535a5e6c8e9 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java @@ -24,8 +24,6 @@ import java.io.IOException; import java.io.PrintWriter; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; - import io.prometheus.client.CollectorRegistry; import io.prometheus.client.exporter.common.TextFormat; @@ -58,7 +56,6 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) return; } } - DefaultMetricsSystem.instance().publishMetricsNow(); PrintWriter writer = resp.getWriter(); getPrometheusSink().writeMetrics(writer); writer.write("\n\n#Dropwizard metrics\n\n"); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java index 2d718628e1e..cb1fdd3375a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java @@ -41,22 +41,20 @@ import org.apache.commons.fileupload.servlet.ServletFileUpload; import org.apache.commons.fileupload.util.Streams; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; - -import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.utils.HddsServerUtil.writeDBCheckpointToStream; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST; import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; -import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Provides the current checkpoint Snapshot of the OM/SCM DB. (tar) */ @@ -287,7 +285,7 @@ private static String[] parseFormDataParameters(HttpServletRequest request) { LOG.warn("Exception occured during form data parsing {}", e.getMessage()); } - return sstParam.size() == 0 ? null : sstParam.toArray(new String[0]); + return sstParam.isEmpty() ? null : sstParam.toArray(new String[0]); } /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java index 342a0400cbd..0dc244bdbc7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java @@ -35,8 +35,6 @@ import org.apache.hadoop.hdds.scm.proxy.SCMClientConfig; import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; @@ -373,80 +371,6 @@ public static List getExistingSstFiles(File db) throws IOException { return sstList; } - /** - * Build CA list which need to be passed to client. - * - * If certificate client is null, obtain the list of CA using SCM security - * client, else it uses certificate client. - * @return list of CA - */ - public static List buildCAList(CertificateClient certClient, - ConfigurationSource configuration) throws IOException { - long waitDuration = - configuration.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL, - OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS); - if (certClient != null) { - if (!SCMHAUtils.isSCMHAEnabled(configuration)) { - return generateCAList(certClient); - } else { - Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration); - int expectedCount = scmNodes.size() + 1; - if (scmNodes.size() > 1) { - // First check if cert client has ca list initialized. - // This is being done, when this method is called multiple times we - // don't make call to SCM, we return from in-memory. - List caCertPemList = certClient.getCAList(); - if (caCertPemList != null && caCertPemList.size() == expectedCount) { - return caCertPemList; - } - return getCAListWithRetry(() -> - waitForCACerts(certClient::updateCAList, expectedCount), - waitDuration); - } else { - return generateCAList(certClient); - } - } - } else { - SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient = - HddsServerUtil.getScmSecurityClient(configuration); - if (!SCMHAUtils.isSCMHAEnabled(configuration)) { - List caCertPemList = new ArrayList<>(); - SCMGetCertResponseProto scmGetCertResponseProto = - scmSecurityProtocolClient.getCACert(); - if (scmGetCertResponseProto.hasX509Certificate()) { - caCertPemList.add(scmGetCertResponseProto.getX509Certificate()); - } - if (scmGetCertResponseProto.hasX509RootCACertificate()) { - caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate()); - } - return caCertPemList; - } else { - Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration); - int expectedCount = scmNodes.size() + 1; - if (scmNodes.size() > 1) { - return getCAListWithRetry(() -> waitForCACerts( - scmSecurityProtocolClient::listCACertificate, - expectedCount), waitDuration); - } else { - return scmSecurityProtocolClient.listCACertificate(); - } - } - } - } - - private static List generateCAList(CertificateClient certClient) - throws IOException { - List caCertPemList = new ArrayList<>(); - for (X509Certificate cert : certClient.getAllRootCaCerts()) { - caCertPemList.add(CertificateCodec.getPEMEncodedString(cert)); - } - for (X509Certificate cert : certClient.getAllCaCerts()) { - caCertPemList.add(CertificateCodec.getPEMEncodedString(cert)); - } - return caCertPemList; - } - - /** * Retry forever until CA list matches expected count. * @param task - task to get CA list. @@ -488,23 +412,37 @@ private static List waitForCACerts( * Build CA List in the format of X509Certificate. * If certificate client is null, obtain the list of CA using SCM * security client, else it uses certificate client. + * * @return list of CA X509Certificates. */ - public static List buildCAX509List( - CertificateClient certClient, - ConfigurationSource conf) throws IOException { - if (certClient != null) { - // Do this here to avoid extra conversion of X509 to pem and again to - // X509 by buildCAList. - if (!SCMHAUtils.isSCMHAEnabled(conf)) { - List x509Certificates = new ArrayList<>(); - x509Certificates.addAll(certClient.getAllCaCerts()); - x509Certificates.addAll(certClient.getAllRootCaCerts()); - return x509Certificates; + public static List buildCAX509List(ConfigurationSource conf) throws IOException { + long waitDuration = + conf.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL, + OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS); + Collection scmNodes = SCMHAUtils.getSCMNodeIds(conf); + SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient = + HddsServerUtil.getScmSecurityClient(conf); + if (!SCMHAUtils.isSCMHAEnabled(conf)) { + List caCertPemList = new ArrayList<>(); + SCMGetCertResponseProto scmGetCertResponseProto = + scmSecurityProtocolClient.getCACert(); + if (scmGetCertResponseProto.hasX509Certificate()) { + caCertPemList.add(scmGetCertResponseProto.getX509Certificate()); + } + if (scmGetCertResponseProto.hasX509RootCACertificate()) { + caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate()); + } + return OzoneSecurityUtil.convertToX509(caCertPemList); + } else { + int expectedCount = scmNodes.size() + 1; + if (scmNodes.size() > 1) { + return OzoneSecurityUtil.convertToX509(getCAListWithRetry(() -> waitForCACerts( + scmSecurityProtocolClient::listCACertificate, + expectedCount), waitDuration)); + } else { + return OzoneSecurityUtil.convertToX509(scmSecurityProtocolClient.listCACertificate()); } } - List pemEncodedCerts = HAUtils.buildCAList(certClient, conf); - return OzoneSecurityUtil.convertToX509(pemEncodedCerts); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index c45e772c241..d80b6b3a272 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -125,11 +125,11 @@ private HddsServerUtil() { HddsServerUtil.class); /** - * Add protobuf-based protocol to the {@link RPC.Server}. + * Add protobuf-based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}. * @param conf configuration * @param protocol Protocol interface * @param service service that implements the protocol - * @param server RPC server to which the protocol & implementation is added to + * @param server RPC server to which the protocol and implementation is added to */ public static void addPBProtocol(Configuration conf, Class protocol, BlockingService service, RPC.Server server) throws IOException { @@ -742,9 +742,7 @@ public static String createStartupShutdownMessage(VersionInfo versionInfo, " version = " + versionInfo.getVersion(), " classpath = " + System.getProperty("java.class.path"), " build = " + versionInfo.getUrl() + "/" - + versionInfo.getRevision() - + " ; compiled by '" + versionInfo.getUser() - + "' on " + versionInfo.getDate(), + + versionInfo.getRevision(), " java = " + System.getProperty("java.version"), " conf = " + conf); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index e7c4ec4ce3d..8387934261c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Objects; +import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.StringCodec; @@ -44,6 +45,7 @@ public final class TransactionInfo implements Comparable { StringCodec.get(), TransactionInfo::valueOf, TransactionInfo::toString, + TransactionInfo.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { @@ -162,7 +164,15 @@ public String toString() { */ public static TransactionInfo readTransactionInfo( DBStoreHAManager metadataManager) throws IOException { - return metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY); + return metadataManager.getTransactionInfoTable().getSkipCache(TRANSACTION_INFO_KEY); + } + + public ByteString toByteString() throws IOException { + return ByteString.copyFrom(getCodec().toPersistedFormat(this)); + } + + public static TransactionInfo fromByteString(ByteString byteString) throws IOException { + return byteString == null ? null : getCodec().fromPersistedFormat(byteString.toByteArray()); } public SnapshotInfo toSnapshotInfo() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java index f62d3ac19cf..bb5eef70d25 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java @@ -33,6 +33,11 @@ private ByteArrayCodec() { // singleton } + @Override + public Class getTypeClass() { + return byte[].class; + } + @Override public byte[] toPersistedFormat(byte[] bytes) { return bytes; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java index 997bdf6cf2e..20e373317b1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java @@ -34,6 +34,11 @@ public static ByteStringCodec get() { private ByteStringCodec() { } + @Override + public Class getTypeClass() { + return ByteString.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java index 653182214b6..a5268e6031c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java @@ -54,32 +54,21 @@ public class DBColumnFamilyDefinition { private final String tableName; - private final Class keyType; - private final Codec keyCodec; - private final Class valueType; - private final Codec valueCodec; - private ManagedColumnFamilyOptions cfOptions; + private volatile ManagedColumnFamilyOptions cfOptions; - public DBColumnFamilyDefinition( - String tableName, - Class keyType, - Codec keyCodec, - Class valueType, - Codec valueCodec) { + public DBColumnFamilyDefinition(String tableName, Codec keyCodec, Codec valueCodec) { this.tableName = tableName; - this.keyType = keyType; this.keyCodec = keyCodec; - this.valueType = valueType; this.valueCodec = valueCodec; this.cfOptions = null; } public Table getTable(DBStore db) throws IOException { - return db.getTable(tableName, keyType, valueType); + return db.getTable(tableName, getKeyType(), getValueType()); } public String getName() { @@ -87,7 +76,7 @@ public String getName() { } public Class getKeyType() { - return keyType; + return keyCodec.getTypeClass(); } public Codec getKeyCodec() { @@ -95,7 +84,7 @@ public Codec getKeyCodec() { } public Class getValueType() { - return valueType; + return valueCodec.getTypeClass(); } public Codec getValueCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java index 968d62f0dd5..461bd35f413 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,9 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; /** * Simple interface to provide information to create a DBStore.. @@ -55,6 +59,16 @@ default File getDBLocation(ConfigurationSource conf) { getLocationConfigKey(), getName()); } + static List getColumnFamilyNames(Iterable> columnFamilies) { + return Collections.unmodifiableList(StreamSupport.stream(columnFamilies.spliterator(), false) + .map(DBColumnFamilyDefinition::getName) + .collect(Collectors.toList())); + } + + default List getColumnFamilyNames() { + return getColumnFamilyNames(getColumnFamilies()); + } + /** * @return The column families present in the DB. */ @@ -109,9 +123,17 @@ interface WithMapInterface extends DBDefinition { */ abstract class WithMap implements WithMapInterface { private final Map> map; + private final Supplier> columnFamilyNames; protected WithMap(Map> map) { this.map = map; + this.columnFamilyNames = MemoizedSupplier.valueOf( + () -> DBDefinition.getColumnFamilyNames(getColumnFamilies())); + } + + @Override + public final List getColumnFamilyNames() { + return columnFamilyNames.get(); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java index 3e8ea30a652..8623a3bdd7d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java @@ -184,7 +184,7 @@ void move(KEY sourceKey, KEY destKey, VALUE value, /** * Get List of Index to Table Names. * (For decoding table from column family index) - * @return Map of Index -> TableName + * @return Map of Index -> TableName */ Map getTableNames(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index ed8d145b666..1e42241ee43 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -163,7 +163,8 @@ private DBStoreBuilder(ConfigurationSource configuration, OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT_DEFAULT, StorageUnit.BYTES); } - private void applyDBDefinition(DBDefinition definition) { + public static File getDBDirPath(DBDefinition definition, + ConfigurationSource configuration) { // Set metadata dirs. File metadataDir = definition.getDBLocation(configuration); @@ -174,6 +175,12 @@ private void applyDBDefinition(DBDefinition definition) { HddsConfigKeys.OZONE_METADATA_DIRS); metadataDir = getOzoneMetaDirPath(configuration); } + return metadataDir; + } + + private void applyDBDefinition(DBDefinition definition) { + // Set metadata dirs. + File metadataDir = getDBDirPath(definition, configuration); setName(definition.getName()); setPath(Paths.get(metadataDir.getPath())); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java new file mode 100644 index 00000000000..9cc1695298c --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java @@ -0,0 +1,133 @@ +package org.apache.hadoop.hdds.utils.db; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * InMemory Table implementation for tests. + */ +public final class InMemoryTestTable implements Table { + private final Map map = new ConcurrentHashMap<>(); + + @Override + public void close() { + } + + @Override + public void put(KEY key, VALUE value) { + map.put(key, value); + } + + @Override + public void putWithBatch(BatchOperation batch, KEY key, VALUE value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean isExist(KEY key) { + return map.containsKey(key); + } + + @Override + public VALUE get(KEY key) { + return map.get(key); + } + + @Override + public VALUE getIfExist(KEY key) { + return map.get(key); + } + + @Override + public void delete(KEY key) { + map.remove(key); + } + + @Override + public void deleteWithBatch(BatchOperation batch, KEY key) { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteRange(KEY beginKey, KEY endKey) { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator(KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public String getName() { + return ""; + } + + @Override + public long getEstimatedKeyCount() { + return map.size(); + } + + @Override + public List> getRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public List> getSequentialRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void dumpToFileWithPrefix(File externalFile, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void loadFromFile(File externalFile) { + throw new UnsupportedOperationException(); + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java index c47b176e93b..015cd10b8b9 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointUtils.java @@ -49,7 +49,7 @@ public static boolean waitForCheckpointDirectoryExist(File file, final boolean success = RatisHelper.attemptUntilTrue(file::exists, POLL_INTERVAL_DURATION, maxWaitTimeout); if (!success) { LOG.info("Checkpoint directory: {} didn't get created in {} secs.", - maxWaitTimeout.getSeconds(), file.getAbsolutePath()); + file.getAbsolutePath(), maxWaitTimeout.getSeconds()); } return success; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index c441ec929c7..945138b8b8b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -306,7 +306,7 @@ public void batchPut(ManagedWriteBatch writeBatch, ByteBuffer key, ByteBuffer value) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("batchPut buffer key {}", bytes2String(key.duplicate())); - LOG.debug("batchPut buffer value {}", bytes2String(value.duplicate())); + LOG.debug("batchPut buffer value size {}", value.remaining()); } try (UncheckedAutoCloseable ignored = acquire()) { @@ -841,7 +841,7 @@ private int getLastLevel() throws IOException { /** * Deletes sst files which do not correspond to prefix * for given table. - * @param prefixPairs, a map of TableName to prefixUsed. + * @param prefixPairs a map of TableName to prefixUsed. */ public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws IOException, RocksDBException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java index c818c07b1ac..c7055267052 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java @@ -24,6 +24,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -169,7 +170,7 @@ default VALUE getReadCopy(KEY key) throws IOException { /** * Returns a prefixed iterator for this metadata store. * @param prefix - * @return + * @return MetaStoreIterator */ TableIterator> iterator(KEY prefix) throws IOException; @@ -245,7 +246,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { /** * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link MetadataKeyFilters.MetadataKeyFilter} + * startKey or count. Further a {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} * can be added to * filter keys if necessary. * To prevent race conditions while listing * entries, this implementation takes a snapshot and lists the entries from @@ -261,7 +262,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * the value for count must be an integer greater than 0. *

    * This method allows to specify one or more - * {@link MetadataKeyFilters.MetadataKeyFilter} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} * to filter keys by certain condition. Once given, only the entries * whose key passes all the filters will be included in the result. * @@ -269,7 +270,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link MetadataKeyFilters.MetadataKeyFilter}. + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database or an empty list if the * startKey is invalid. * @throws IOException if there are I/O errors. @@ -292,7 +293,7 @@ List> getRangeKVs(KEY startKey, * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link MetadataKeyFilters.MetadataKeyFilter}. + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database. * @throws IOException * @throws IllegalArgumentException @@ -307,7 +308,6 @@ List> getSequentialRangeKVs(KEY startKey, * as part of a batch operation. * @param batch * @param prefix - * @return */ void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) throws IOException; @@ -354,6 +354,24 @@ public V getValue() { public String toString() { return "(key=" + key + ", value=" + value + ")"; } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof KeyValue)) { + return false; + } + KeyValue kv = (KeyValue) obj; + try { + return getKey().equals(kv.getKey()) && getValue().equals(kv.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public int hashCode() { + return Objects.hash(getKey(), getValue()); + } }; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java index 0c1ec710d2c..c428f2860ee 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java @@ -48,7 +48,7 @@ * This interface must be implemented by entities requiring audit logging. * For example - OMVolumeArgs, OMBucketArgs. * The implementing class must override toAuditMap() to return an - * instance of Map where both Key and Value are String. + * instance of {@code Map} where both Key and Value are String. * * Key: must contain printable US ASCII characters * May not contain a space, =, ], or " diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css index e08e9c52060..4988cc8eeb1 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css @@ -91,3 +91,26 @@ body { .om-roles-background { background-color: #dcfbcd!important; } + +.scm-roles-background { + background-color: #dcfbcd!important; +} +.toggle-btn { + background: transparent; /* No background color */ + color: #007bff; /* Button text color */ + border: none; /* No border */ + font-size: 12px; /* Font size for better readability */ + cursor: pointer; /* Pointer cursor on hover */ + padding: 5px 10px; /* Padding around the text */ + margin-bottom: 5px; /* Space below the button */ + transition: color 0.3s, transform 0.3s; /* Smooth transition for color and transform */ +} + +.toggle-btn:hover { + color: #0056b3; /* Darker color on hover */ + transform: scale(1.1); /* Slightly scale up the button on hover */ +} + +.toggle-btn:focus { + outline: none; /* Remove default focus outline */ +} diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js index a31078cfd7b..7bb93106284 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js @@ -48,8 +48,14 @@ }); angular.module('ozone').component('jvmParameters', { templateUrl: 'static/templates/jvm.html', - controller: function($http) { + controller: function($http, $scope) { var ctrl = this; + + $scope.contentVisible = false; + $scope.toggleContent = function() { + $scope.contentVisible = !$scope.contentVisible; + }; + $http.get("jmx?qry=java.lang:type=Runtime") .then(function(result) { ctrl.jmx = result.data.beans[0]; @@ -245,7 +251,11 @@ angular.module('ozone').component('navmenu', { bindings: { - metrics: '<' + metrics: '<', + iostatus: '<', + ioLinkHref: '@', + scanner: '<', + scannerLinkHref: '@', }, templateUrl: 'static/templates/menu.html', controller: function($http) { diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html index c1f7d16aefa..c562ae7d9a2 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html @@ -20,7 +20,16 @@ {{$ctrl.jmx.SystemProperties.java_vm_name}} {{$ctrl.jmx.SystemProperties.java_vm_version}} - Input arguments: - {{$ctrl.jmx.InputArguments}} + + Input arguments: + + + +

    +
    {{$ctrl.jmx.InputArguments.join('\n')}}
    +
    + diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html index 95f1b4842f1..9a14f356d7a 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html @@ -56,5 +56,7 @@ aria-hidden="true">
+
  • IO Status
  • +
  • Data Scanner
  • diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html index 30e2d26f56f..2811e8c36a5 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html @@ -14,9 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. --> -

    Overview

    +

    Overview ({{$ctrl.jmx.Hostname}})

    + + + + diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java index fa784b75538..94ef86650c4 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java @@ -48,6 +48,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; @@ -257,16 +258,6 @@ public Set getAllCaCerts() { return rootCerts; } - @Override - public List getCAList() { - return null; - } - - @Override - public List updateCAList() throws IOException { - return null; - } - public void renewRootCA() throws Exception { LocalDateTime start = LocalDateTime.now(); Duration rootCACertDuration = securityConfig.getMaxCertificateDuration(); @@ -364,6 +355,17 @@ public ReloadingX509TrustManager getTrustManager() throws CertificateException { } } + @Override + public ClientTrustManager createClientTrustManager() throws IOException { + CACertificateProvider caCertificateProvider = () -> { + List caCerts = new ArrayList<>(); + caCerts.addAll(getAllCaCerts()); + caCerts.addAll(getAllRootCaCerts()); + return caCerts; + }; + return new ClientTrustManager(caCertificateProvider, caCertificateProvider); + } + @Override public void registerNotificationReceiver(CertificateNotification receiver) { synchronized (notificationReceivers) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java new file mode 100644 index 00000000000..47a90d05df7 --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.hdds.server; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This class is to test the utilities present in the OzoneAdmins class. + */ +class TestOzoneAdmins { + // The following set of tests are to validate the S3 based utilities present in OzoneAdmins + private OzoneConfiguration configuration; + + @BeforeEach + void setUp() { + configuration = new OzoneConfiguration(); + } + + @ParameterizedTest + @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, + OzoneConfigKeys.OZONE_ADMINISTRATORS}) + void testS3AdminExtraction(String configKey) throws IOException { + configuration.set(configKey, "alice,bob"); + + assertThat(OzoneAdmins.getS3AdminsFromConfig(configuration)) + .containsAll(Arrays.asList("alice", "bob")); + } + + @ParameterizedTest + @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, + OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS}) + void testS3AdminGroupExtraction(String configKey) { + configuration.set(configKey, "test1, test2"); + + assertThat(OzoneAdmins.getS3AdminsGroupsFromConfig(configuration)) + .containsAll(Arrays.asList("test1", "test2")); + } + + @ParameterizedTest + @CsvSource({ + OzoneConfigKeys.OZONE_ADMINISTRATORS + ", " + OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, + OzoneConfigKeys.OZONE_S3_ADMINISTRATORS + ", " + OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS + }) + void testIsAdmin(String adminKey, String adminGroupKey) { + // When there is no S3 admin, but Ozone admins present + configuration.set(adminKey, "alice"); + configuration.set(adminGroupKey, "test_group"); + + OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + + assertThat(admins.isAdmin(ugi)).isEqualTo(true); + + // Test that when a user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(true); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testIsAdminWithUgi(boolean isAdminSet) { + if (isAdminSet) { + configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice"); + configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, "test_group"); + } + OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + // Test that when a user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + + assertThat(admins.isAdmin(ugi)).isEqualTo(isAdminSet); + assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(isAdminSet); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testIsS3AdminWithUgiAndConfiguration(boolean isAdminSet) { + if (isAdminSet) { + configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice"); + configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, "test_group"); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + // Scenario when user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + + assertThat(OzoneAdmins.isS3Admin(ugi, configuration)).isEqualTo(true); + assertThat(OzoneAdmins.isS3Admin(ugiGroupOnly, configuration)).isEqualTo(true); + } else { + assertThat(OzoneAdmins.isS3Admin(null, configuration)).isEqualTo(false); + } + + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index aad3e9e12e6..7966afe5045 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -179,7 +179,7 @@ public void builderWithColumnFamilyOptions(@TempDir Path tempDir) String sampleTableName = "sampleTable"; final DBColumnFamilyDefinition sampleTable = new DBColumnFamilyDefinition<>(sampleTableName, - String.class, StringCodec.get(), Long.class, LongCodec.get()); + StringCodec.get(), LongCodec.get()); final DBDefinition sampleDB = new DBDefinition.WithMap( DBColumnFamilyDefinition.newUnmodifiableMap(sampleTable)) { { @@ -250,8 +250,8 @@ public void testIfAutoCompactionDisabled(boolean disableAutoCompaction, String sampleTableName = "sampleTable"; final DBColumnFamilyDefinition sampleTable = - new DBColumnFamilyDefinition<>(sampleTableName, String.class, - StringCodec.get(), Long.class, LongCodec.get()); + new DBColumnFamilyDefinition<>(sampleTableName, + StringCodec.get(), LongCodec.get()); final DBDefinition sampleDB = new DBDefinition.WithMap( DBColumnFamilyDefinition.newUnmodifiableMap(sampleTable)) { @Override diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 03b677e3818..7676f1f45f1 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop client dependencies Apache Ozone HDDS Hadoop Client dependencies @@ -51,10 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.xerial.snappy snappy-java - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - org.apache.hadoop hadoop-annotations @@ -63,10 +59,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.guava guava - - commons-cli - commons-cli - + org.apache.commons commons-math3 @@ -210,10 +203,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - com.nimbusds nimbus-jose-jwt @@ -224,10 +213,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} compile - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - com.google.guava guava diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 69daeac4bd7..6be31002b09 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-server - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop server dependencies Apache Ozone HDDS Hadoop Server dependencies @@ -51,10 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.xerial.snappy snappy-java - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - org.apache.curator * @@ -148,10 +144,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - com.nimbusds nimbus-jose-jwt @@ -171,10 +163,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.sun.jersey * - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - io.netty * diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 80ec91cd6d9..f04e45a0340 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop test dependencies Apache Ozone HDDS Hadoop Test dependencies diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index 9230b02b524..f3197dc8965 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-admin - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Admin interface Apache Ozone HDDS Admin Interface @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true @@ -80,14 +81,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index dd4350ae499..ee187bfdc5d 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -299,6 +299,7 @@ message SCMListContainerRequestProto { message SCMListContainerResponseProto { repeated ContainerInfoProto containers = 1; + optional int64 containerCount = 2; } message SCMDeleteContainerRequestProto { @@ -619,30 +620,31 @@ message ContainerBalancerStatusInfoRequestProto { message ContainerBalancerStatusInfoResponseProto { optional bool isRunning = 1; - optional ContainerBalancerStatusInfo containerBalancerStatusInfo = 2; + optional ContainerBalancerStatusInfoProto containerBalancerStatusInfo = 2; } -message ContainerBalancerStatusInfo { +message ContainerBalancerStatusInfoProto { optional uint64 startedAt = 1; optional ContainerBalancerConfigurationProto configuration = 2; - repeated ContainerBalancerTaskIterationStatusInfo iterationsStatusInfo = 3; + repeated ContainerBalancerTaskIterationStatusInfoProto iterationsStatusInfo = 3; } -message ContainerBalancerTaskIterationStatusInfo { +message ContainerBalancerTaskIterationStatusInfoProto { optional int32 iterationNumber = 1; optional string iterationResult = 2; - optional int64 sizeScheduledForMoveGB = 3; - optional int64 dataSizeMovedGB = 4; + optional int64 sizeScheduledForMove = 3; + optional int64 dataSizeMoved = 4; optional int64 containerMovesScheduled = 5; optional int64 containerMovesCompleted = 6; optional int64 containerMovesFailed = 7; optional int64 containerMovesTimeout = 8; - repeated NodeTransferInfo sizeEnteringNodesGB = 9; - repeated NodeTransferInfo sizeLeavingNodesGB = 10; + repeated NodeTransferInfoProto sizeEnteringNodes = 9; + repeated NodeTransferInfoProto sizeLeavingNodes = 10; + optional int64 iterationDuration = 11; } -message NodeTransferInfo { +message NodeTransferInfoProto { optional string uuid = 1; - optional int64 dataVolumeGB = 2; + optional int64 dataVolume = 2; } message DecommissionScmRequestProto { diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 98cfc53f5e8..1a61dfa930e 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Client interface Apache Ozone HDDS Client Interface @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true @@ -40,7 +41,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_7 + hadoop-shaded-protobuf_3_25 org.apache.ratis @@ -176,13 +177,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index ee5c0d9cc5a..cb4862cb6f3 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -61,7 +61,7 @@ message ExtendedDatanodeDetailsProto { optional string version = 2; optional int64 setupTime = 3; optional string revision = 4; - optional string buildDate = 5; + optional string buildDate = 5; // unused, reserved for compatibility } message MoveDataNodePairProto { @@ -191,6 +191,7 @@ message DatanodeUsageInfoProto { optional int64 containerCount = 5; optional int64 committed = 6; optional int64 freeSpaceToSpare = 7; + optional int64 pipelineCount = 8; } /** @@ -256,6 +257,7 @@ message GetScmInfoResponseProto { required string clusterId = 1; required string scmId = 2; repeated string peerRoles = 3; + optional bool scmRatisEnabled = 4; } message AddScmRequestProto { diff --git a/hadoop-hdds/interface-client/src/main/resources/proto.lock b/hadoop-hdds/interface-client/src/main/resources/proto.lock index 1f3f552a4d1..e59b77b93d4 100644 --- a/hadoop-hdds/interface-client/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-client/src/main/resources/proto.lock @@ -2433,6 +2433,18 @@ "name": "containerCount", "type": "int64", "optional": true + }, + { + "id": 6, + "name": "committed", + "type": "int64", + "optional": true + }, + { + "id": 7, + "name": "freeSpaceToSpare", + "type": "int64", + "optional": true } ] }, diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index df65c1e2b2a..47bde5a0bc7 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-server - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Server interface Apache Ozone HDDS Server Interface @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true @@ -142,13 +143,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - diff --git a/hadoop-hdds/interface-server/src/main/resources/proto.lock b/hadoop-hdds/interface-server/src/main/resources/proto.lock index 6966915f4a2..bb5748eab29 100644 --- a/hadoop-hdds/interface-server/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-server/src/main/resources/proto.lock @@ -1427,6 +1427,30 @@ "value": "false" } ] + }, + { + "id": 8, + "name": "committed", + "type": "uint64", + "optional": true, + "options": [ + { + "name": "default", + "value": "0" + } + ] + }, + { + "id": 9, + "name": "freeSpaceToSpare", + "type": "uint64", + "optional": true, + "options": [ + { + "name": "default", + "value": "0" + } + ] } ] }, diff --git a/hadoop-hdds/managed-rocksdb/pom.xml b/hadoop-hdds/managed-rocksdb/pom.xml index 125783222e5..40ad920647a 100644 --- a/hadoop-hdds/managed-rocksdb/pom.xml +++ b/hadoop-hdds/managed-rocksdb/pom.xml @@ -19,10 +19,10 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-managed-rocksdb - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Managed RocksDB library Apache Ozone HDDS Managed RocksDB jar diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java index 5a5a577351b..ead43e9aaf8 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hdds.utils.db.managed; +import org.apache.commons.io.FilenameUtils; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.DBOptions; @@ -31,6 +32,8 @@ import java.io.IOException; import java.time.Duration; import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; /** * Managed {@link RocksDB}. @@ -102,4 +105,14 @@ public void deleteFile(LiveFileMetaData fileToBeDeleted) File file = new File(fileToBeDeleted.path(), fileToBeDeleted.fileName()); ManagedRocksObjectUtils.waitForFileDelete(file, Duration.ofSeconds(60)); } + + public static Map getLiveMetadataForSSTFiles(RocksDB db) { + return db.getLiveFilesMetaData().stream().collect( + Collectors.toMap(liveFileMetaData -> FilenameUtils.getBaseName(liveFileMetaData.fileName()), + liveFileMetaData -> liveFileMetaData)); + } + + public Map getLiveMetadataForSSTFiles() { + return getLiveMetadataForSSTFiles(this.get()); + } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index 148abee7fc0..d58f70495fe 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -55,12 +55,7 @@ static UncheckedAutoCloseable track(AutoCloseable object) { static void reportLeak(Class clazz, String stackTrace) { ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject(); - String warning = String.format("%s is not closed properly", clazz.getSimpleName()); - if (stackTrace != null && LOG.isDebugEnabled()) { - String debugMessage = String.format("%nStackTrace for unclosed instance: %s", stackTrace); - warning = warning.concat(debugMessage); - } - LOG.warn(warning); + HddsUtils.reportLeak(clazz, stackTrace, LOG); } private static @Nullable StackTraceElement[] getStackTrace() { diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index a863fe3ef5d..b3aa6ff6952 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -20,15 +20,19 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone-main - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Project Apache Ozone HDDS pom + + true + + annotations hadoop-dependency-client @@ -54,18 +58,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> rocks-native - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - @@ -247,6 +239,30 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + org.apache.ozone:ozone-dev-support:${ozone.version} + + + + + org.apache.ozone + ozone-dev-support + ${ozone.version} + + + + + + process + + + + @@ -298,36 +314,5 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - add-classpath-descriptor - - - src/main/java - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - add-classpath-descriptor - prepare-package - - build-classpath - - - ${project.build.outputDirectory}/${project.artifactId}.classpath - $HDDS_LIB_JARS_DIR - true - runtime - - - - - - - diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 5fc9949514b..4c751e0b10a 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -18,7 +18,7 @@ hdds org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 Apache Ozone HDDS RocksDB Tools @@ -385,7 +385,7 @@ maven-dependency-plugin - copy-jars + copy-dependencies process-sources copy-dependencies diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java index ce424c930e1..08e397d0683 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.ozone.util.ShutdownHookManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; @@ -36,6 +38,8 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + /** * Class to load Native Libraries. */ @@ -67,6 +71,10 @@ public static NativeLibraryLoader getInstance() { return instance; } + public static String getJniLibraryFileName() { + return appendLibOsSuffix("lib" + ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + public static String getJniLibraryFileName(String libraryName) { return appendLibOsSuffix("lib" + libraryName); } @@ -99,9 +107,12 @@ private static String appendLibOsSuffix(String libraryFileName) { return libraryFileName + getLibOsSuffix(); } + public static boolean isLibraryLoaded() { + return isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + public static boolean isLibraryLoaded(final String libraryName) { - return getInstance().librariesLoaded - .getOrDefault(libraryName, false); + return getInstance().librariesLoaded.getOrDefault(libraryName, false); } public synchronized boolean loadLibrary(final String libraryName, final List dependentFiles) { @@ -161,31 +172,38 @@ private Pair, List> copyResourceFromJarToTemp(final String getSystemProperty(NATIVE_LIB_TMP_DIR) : ""; final File dir = new File(nativeLibDir).getAbsoluteFile(); - // create a temporary file to copy the library to - final File temp = File.createTempFile(libraryName, getLibOsSuffix(), dir); - if (!temp.exists()) { + // create a temporary dir to copy the library to + final Path tempPath = Files.createTempDirectory(dir.toPath(), libraryName); + final File tempDir = tempPath.toFile(); + if (!tempDir.exists()) { return Pair.of(Optional.empty(), null); - } else { - temp.deleteOnExit(); } - Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING); + Path libPath = tempPath.resolve(libraryFileName); + Files.copy(is, libPath, StandardCopyOption.REPLACE_EXISTING); + File libFile = libPath.toFile(); + if (libFile.exists()) { + libFile.deleteOnExit(); + } + List dependentFiles = new ArrayList<>(); for (String fileName : dependentFileNames) { if (is != null) { is.close(); } is = getResourceStream(fileName); - File file = new File(dir, fileName); - Files.copy(is, file.toPath(), StandardCopyOption.REPLACE_EXISTING); + Path path = tempPath.resolve(fileName); + Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); + File file = path.toFile(); if (file.exists()) { file.deleteOnExit(); } dependentFiles.add(file); } - ShutdownHookManager.get().addShutdownHook(temp::delete, + ShutdownHookManager.get().addShutdownHook( + () -> FileUtil.fullyDelete(tempDir), LIBRARY_SHUTDOWN_HOOK_PRIORITY); - return Pair.of(Optional.of(temp), dependentFiles); + return Pair.of(Optional.of(libFile), dependentFiles); } finally { if (is != null) { is.close(); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index f0074e0a1ac..6e1622ebd7c 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.utils; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.io.TempDir; @@ -28,15 +29,16 @@ import java.io.ByteArrayInputStream; import java.io.File; import java.nio.file.Path; -import java.util.Collections; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Stream; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.NATIVE_LIB_TMP_DIR; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.getJniLibraryFileName; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.anyString; @@ -68,21 +70,45 @@ public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throw mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()).thenReturn(loader); ManagedRawSSTFileReader.loadLibrary(); assertTrue(NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + } + } + + @ParameterizedTest + @MethodSource("nativeLibraryDirectoryLocations") + public void testDummyLibrary(String nativeLibraryDirectoryLocation) { + Map libraryLoadedMap = new HashMap<>(); + NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); + try (MockedStatic mockedNativeLibraryLoader = mockStatic(NativeLibraryLoader.class, + CALLS_REAL_METHODS)) { + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) + .thenReturn(nativeLibraryDirectoryLocation); + mockedNativeLibraryLoader.when(NativeLibraryLoader::getInstance).thenReturn(loader); // Mocking to force copy random bytes to create a lib file to // nativeLibraryDirectoryLocation. But load library will fail. mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(anyString())) .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); String dummyLibraryName = "dummy_lib"; - NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, Collections.emptyList()); - NativeLibraryLoader.isLibraryLoaded(dummyLibraryName); + List dependencies = Arrays.asList("dep1", "dep2"); + File absDir = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) + .getAbsoluteFile(); + + NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, dependencies); + // Checking if the resource with random was copied to a temp file. - File[] libPath = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) - .getAbsoluteFile().listFiles((dir, name) -> name.startsWith(dummyLibraryName) && - name.endsWith(NativeLibraryLoader.getLibOsSuffix())); - assertNotNull(libPath); - assertEquals(1, libPath.length); - assertTrue(libPath[0].delete()); + File[] libPath = absDir + .listFiles((dir, name) -> name.startsWith(dummyLibraryName)); + assertThat(libPath) + .isNotNull() + .isNotEmpty(); + assertThat(libPath[0]) + .isDirectory(); + try { + assertThat(new File(libPath[0], getJniLibraryFileName(dummyLibraryName))) + .isFile(); + dependencies.forEach(dep -> assertThat(new File(libPath[0], dep)).isFile()); + } finally { + FileUtil.fullyDelete(libPath[0]); + } } - } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index 3e535c5f5f2..c4284a4e85d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT rocksdb-checkpoint-differ - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT RocksDB Checkpoint Differ RocksDB Checkpoint Differ jar diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java index fa0d1f5491d..2d67d5003ae 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java @@ -20,7 +20,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.rocksdb.LiveFileMetaData; import java.util.Objects; @@ -128,6 +130,16 @@ public Builder setColumnFamily(String columnFamily) { return this; } + public Builder setValues(LiveFileMetaData fileMetaData) { + if (fileMetaData != null) { + String columnFamilyName = StringUtils.bytes2String(fileMetaData.columnFamilyName()); + String startRangeValue = StringUtils.bytes2String(fileMetaData.smallestKey()); + String endRangeValue = StringUtils.bytes2String(fileMetaData.largestKey()); + this.setColumnFamily(columnFamilyName).setStartRange(startRangeValue).setEndRange(endRangeValue); + } + return this; + } + public CompactionFileInfo build() { if ((startRange != null || endRange != null || columnFamily != null) && (startRange == null || endRange == null || columnFamily == null)) { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java index c27763b9788..04980821ba9 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java @@ -38,7 +38,8 @@ public final class CompactionLogEntry implements private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(CompactionLogEntryProto.getDefaultInstance()), CompactionLogEntry::getFromProtobuf, - CompactionLogEntry::getProtobuf); + CompactionLogEntry::getProtobuf, + CompactionLogEntry.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java index f8133e6b92f..45a21970966 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java @@ -17,6 +17,8 @@ */ package org.apache.ozone.rocksdiff; +import org.apache.ozone.compaction.log.CompactionFileInfo; + /** * Node in the compaction DAG that represents an SST file. */ @@ -48,6 +50,11 @@ public CompactionNode(String file, long numKeys, long seqNum, this.columnFamily = columnFamily; } + public CompactionNode(CompactionFileInfo compactionFileInfo) { + this(compactionFileInfo.getFileName(), -1, -1, compactionFileInfo.getStartKey(), + compactionFileInfo.getEndKey(), compactionFileInfo.getColumnFamily()); + } + @Override public String toString() { return String.format("Node{%s}", fileName); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 08a013fc7c7..930c2a269b5 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -26,14 +26,16 @@ import java.io.FileNotFoundException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; +import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -42,11 +44,9 @@ import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; import org.apache.ozone.rocksdb.util.RdbUtil; @@ -74,7 +74,6 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -174,6 +173,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, private ColumnFamilyHandle compactionLogTableCFHandle; private ManagedRocksDB activeRocksDB; + private ConcurrentMap inflightCompactions; /** * For snapshot diff calculation we only need to track following column @@ -245,6 +245,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, } else { this.scheduler = null; } + this.inflightCompactions = new ConcurrentHashMap<>(); } private String createCompactionLogDir(String metadataDirName, @@ -463,7 +464,7 @@ public void onCompactionBegin(RocksDB db, return; } } - + inflightCompactions.putAll(toFileInfoList(compactionJobInfo.inputFiles(), db)); for (String file : compactionJobInfo.inputFiles()) { createLink(Paths.get(sstBackupDir, new File(file).getName()), Paths.get(file)); @@ -484,17 +485,21 @@ public void onCompactionCompleted(RocksDB db, } long trxId = db.getLatestSequenceNumber(); - + Map inputFileCompactions = toFileInfoList(compactionJobInfo.inputFiles(), db); CompactionLogEntry.Builder builder; - try (ManagedOptions options = new ManagedOptions(); - ManagedReadOptions readOptions = new ManagedReadOptions()) { - builder = new CompactionLogEntry.Builder(trxId, - System.currentTimeMillis(), - toFileInfoList(compactionJobInfo.inputFiles(), options, - readOptions), - toFileInfoList(compactionJobInfo.outputFiles(), options, - readOptions)); - } + builder = new CompactionLogEntry.Builder(trxId, + System.currentTimeMillis(), + inputFileCompactions.keySet().stream() + .map(inputFile -> { + if (!inflightCompactions.containsKey(inputFile)) { + LOG.warn("Input file not found in inflightCompactionsMap : {} which should have been added on " + + "compactionBeginListener.", + inputFile); + } + return inflightCompactions.getOrDefault(inputFile, inputFileCompactions.get(inputFile)); + }) + .collect(Collectors.toList()), + new ArrayList<>(toFileInfoList(compactionJobInfo.outputFiles(), db).values())); if (LOG.isDebugEnabled()) { builder = builder.setCompactionReason( @@ -502,7 +507,6 @@ public void onCompactionCompleted(RocksDB db, } CompactionLogEntry compactionLogEntry = builder.build(); - synchronized (this) { if (closed) { return; @@ -521,6 +525,9 @@ public void onCompactionCompleted(RocksDB db, populateCompactionDAG(compactionLogEntry.getInputFileInfoList(), compactionLogEntry.getOutputFileInfoList(), compactionLogEntry.getDbSequenceNumber()); + for (String inputFile : inputFileCompactions.keySet()) { + inflightCompactions.remove(inputFile); + } } } }; @@ -640,7 +647,7 @@ private String trimSSTFilename(String filename) { * @param rocksDB open rocksDB instance. * @return a list of SST files (without extension) in the DB. */ - public HashSet readRocksDBLiveFiles(ManagedRocksDB rocksDB) { + public Set readRocksDBLiveFiles(ManagedRocksDB rocksDB) { HashSet liveFiles = new HashSet<>(); final List cfs = Arrays.asList( @@ -789,7 +796,7 @@ private void preconditionChecksForLoadAllCompactionLogs() { * and appends the extension '.sst'. */ private String getSSTFullPath(String sstFilenameWithoutExtension, - String dbPath) { + String... dbPaths) { // Try to locate the SST in the backup dir first final Path sstPathInBackupDir = Paths.get(sstBackupDir, @@ -800,11 +807,13 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, // SST file does not exist in the SST backup dir, this means the SST file // has not gone through any compactions yet and is only available in the - // src DB directory - final Path sstPathInDBDir = Paths.get(dbPath, - sstFilenameWithoutExtension + SST_FILE_EXTENSION); - if (Files.exists(sstPathInDBDir)) { - return sstPathInDBDir.toString(); + // src DB directory or destDB directory + for (String dbPath : dbPaths) { + final Path sstPathInDBDir = Paths.get(dbPath, + sstFilenameWithoutExtension + SST_FILE_EXTENSION); + if (Files.exists(sstPathInDBDir)) { + return sstPathInDBDir.toString(); + } } // TODO: More graceful error handling? @@ -825,25 +834,23 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, * e.g. ["/path/to/sstBackupDir/000050.sst", * "/path/to/sstBackupDir/000060.sst"] */ - public synchronized List getSSTDiffListWithFullPath( - DifferSnapshotInfo src, - DifferSnapshotInfo dest, - String sstFilesDirForSnapDiffJob - ) throws IOException { + public synchronized Optional> getSSTDiffListWithFullPath(DifferSnapshotInfo src, + DifferSnapshotInfo dest, + String sstFilesDirForSnapDiffJob) { - List sstDiffList = getSSTDiffList(src, dest); + Optional> sstDiffList = getSSTDiffList(src, dest); - return sstDiffList.stream() + return sstDiffList.map(diffList -> diffList.stream() .map( sst -> { - String sstFullPath = getSSTFullPath(sst, src.getDbPath()); + String sstFullPath = getSSTFullPath(sst, src.getDbPath(), dest.getDbPath()); Path link = Paths.get(sstFilesDirForSnapDiffJob, sst + SST_FILE_EXTENSION); Path srcFile = Paths.get(sstFullPath); createLink(link, srcFile); return link.toString(); }) - .collect(Collectors.toList()); + .collect(Collectors.toList())); } /** @@ -857,10 +864,8 @@ public synchronized List getSSTDiffListWithFullPath( * @param dest destination snapshot * @return A list of SST files without extension. e.g. ["000050", "000060"] */ - public synchronized List getSSTDiffList( - DifferSnapshotInfo src, - DifferSnapshotInfo dest - ) throws IOException { + public synchronized Optional> getSSTDiffList(DifferSnapshotInfo src, + DifferSnapshotInfo dest) { // TODO: Reject or swap if dest is taken after src, once snapshot chain // integration is done. @@ -894,29 +899,18 @@ public synchronized List getSSTDiffList( LOG.debug("{}", logSB); } - if (src.getTablePrefixes() != null && !src.getTablePrefixes().isEmpty()) { - filterRelevantSstFilesFullPath(fwdDAGDifferentFiles, - src.getTablePrefixes()); + // Check if the DAG traversal was able to reach all the destination SST files. + for (String destSnapFile : destSnapFiles) { + if (!fwdDAGSameFiles.contains(destSnapFile) && !fwdDAGDifferentFiles.contains(destSnapFile)) { + return Optional.empty(); + } } - return new ArrayList<>(fwdDAGDifferentFiles); - } - - /** - * construct absolute sst file path first and - * filter the files. - */ - public void filterRelevantSstFilesFullPath(Set inputFiles, - Map tableToPrefixMap) throws IOException { - for (Iterator fileIterator = - inputFiles.iterator(); fileIterator.hasNext();) { - String filename = fileIterator.next(); - String filepath = getAbsoluteSstFilePath(filename); - if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath, - tableToPrefixMap)) { - fileIterator.remove(); - } + if (src.getTablePrefixes() != null && !src.getTablePrefixes().isEmpty()) { + RocksDiffUtils.filterRelevantSstFiles(fwdDAGDifferentFiles, src.getTablePrefixes(), compactionNodeMap, + src.getRocksDB(), dest.getRocksDB()); } + return Optional.of(new ArrayList<>(fwdDAGDifferentFiles)); } /** @@ -939,10 +933,6 @@ synchronized void internalGetSSTDiffList( Preconditions.checkArgument(sameFiles.isEmpty(), "Set must be empty"); Preconditions.checkArgument(differentFiles.isEmpty(), "Set must be empty"); - // Use source snapshot's table prefix. At this point Source and target's - // table prefix should be same. - Map columnFamilyToPrefixMap = src.getTablePrefixes(); - for (String fileName : srcSnapFiles) { if (destSnapFiles.contains(fileName)) { LOG.debug("Source '{}' and destination '{}' share the same SST '{}'", @@ -1006,15 +996,6 @@ synchronized void internalGetSSTDiffList( } for (CompactionNode nextNode : successors) { - if (shouldSkipNode(nextNode, columnFamilyToPrefixMap)) { - LOG.debug("Skipping next node: '{}' with startKey: '{}' and " + - "endKey: '{}' because it doesn't have keys related to " + - "columnFamilyToPrefixMap: '{}'.", - nextNode.getFileName(), nextNode.getStartKey(), - nextNode.getEndKey(), columnFamilyToPrefixMap); - continue; - } - if (sameFiles.contains(nextNode.getFileName()) || differentFiles.contains(nextNode.getFileName())) { LOG.debug("Skipping known processed SST: {}", @@ -1485,86 +1466,22 @@ public void pngPrintMutableGraph(String filePath, GraphType graphType) graph.generateImage(filePath); } - private List toFileInfoList(List sstFiles, - ManagedOptions options, - ManagedReadOptions readOptions - ) { + private Map toFileInfoList(List sstFiles, RocksDB db) { if (CollectionUtils.isEmpty(sstFiles)) { - return Collections.emptyList(); + return Collections.emptyMap(); } - - List response = new ArrayList<>(); - + Map liveFileMetaDataMap = ManagedRocksDB.getLiveMetadataForSSTFiles(db); + Map response = new HashMap<>(); for (String sstFile : sstFiles) { - CompactionFileInfo fileInfo = toFileInfo(sstFile, options, readOptions); - response.add(fileInfo); + String fileName = FilenameUtils.getBaseName(sstFile); + CompactionFileInfo fileInfo = + new CompactionFileInfo.Builder(fileName).setValues(liveFileMetaDataMap.get(fileName)).build(); + response.put(sstFile, fileInfo); } return response; } - private CompactionFileInfo toFileInfo(String sstFile, - ManagedOptions options, - ManagedReadOptions readOptions) { - final int fileNameOffset = sstFile.lastIndexOf("/") + 1; - String fileName = sstFile.substring(fileNameOffset, - sstFile.length() - SST_FILE_EXTENSION_LENGTH); - CompactionFileInfo.Builder fileInfoBuilder = - new CompactionFileInfo.Builder(fileName); - - try (ManagedSstFileReader fileReader = new ManagedSstFileReader(options)) { - fileReader.open(sstFile); - String columnFamily = StringUtils.bytes2String(fileReader.getTableProperties().getColumnFamilyName()); - try (ManagedSstFileReaderIterator iterator = - ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions))) { - iterator.get().seekToFirst(); - String startKey = StringUtils.bytes2String(iterator.get().key()); - iterator.get().seekToLast(); - String endKey = StringUtils.bytes2String(iterator.get().key()); - fileInfoBuilder.setStartRange(startKey) - .setEndRange(endKey) - .setColumnFamily(columnFamily); - } - } catch (RocksDBException rocksDBException) { - // Ideally it should not happen. If it does just log the exception. - // And let the compaction complete without the exception. - // Throwing exception in compaction listener could fail the RocksDB. - // In case of exception, compaction node will be missing start key, - // end key and column family. And during diff calculation it will - // continue the traversal as it was before HDDS-8940. - LOG.warn("Failed to read SST file: {}.", sstFile, rocksDBException); - } - return fileInfoBuilder.build(); - } - - @VisibleForTesting - boolean shouldSkipNode(CompactionNode node, - Map columnFamilyToPrefixMap) { - // This is for backward compatibility. Before the compaction log table - // migration, startKey, endKey and columnFamily information is not persisted - // in compaction log files. - // Also for the scenario when there is an exception in reading SST files - // for the file node. - if (node.getStartKey() == null || node.getEndKey() == null || - node.getColumnFamily() == null) { - LOG.debug("Compaction node with fileName: {} doesn't have startKey, " + - "endKey and columnFamily details.", node.getFileName()); - return false; - } - - if (MapUtils.isEmpty(columnFamilyToPrefixMap)) { - LOG.debug("Provided columnFamilyToPrefixMap is null or empty."); - return false; - } - - if (!columnFamilyToPrefixMap.containsKey(node.getColumnFamily())) { - LOG.debug("SstFile node: {} is for columnFamily: {} while filter map " + - "contains columnFamilies: {}.", node.getFileName(), - node.getColumnFamily(), columnFamilyToPrefixMap.keySet()); - return true; - } - - String keyPrefix = columnFamilyToPrefixMap.get(node.getColumnFamily()); - return !RocksDiffUtils.isKeyWithPrefixPresent(keyPrefix, node.getStartKey(), - node.getEndKey()); + ConcurrentMap getInflightCompactions() { + return inflightCompactions; } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java index e116868410f..6f044e165a0 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java @@ -17,22 +17,22 @@ */ package org.apache.ozone.rocksdiff; +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; -import org.rocksdb.TableProperties; -import org.rocksdb.RocksDBException; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.ozone.compaction.log.CompactionFileInfo; +import org.rocksdb.LiveFileMetaData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; -import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** @@ -73,44 +73,68 @@ public static String constructBucketKey(String keyName) { } public static void filterRelevantSstFiles(Set inputFiles, - Map tableToPrefixMap) throws IOException { + Map tableToPrefixMap, + ManagedRocksDB... dbs) { + filterRelevantSstFiles(inputFiles, tableToPrefixMap, Collections.emptyMap(), dbs); + } + + /** + * Filter sst files based on prefixes. + */ + public static void filterRelevantSstFiles(Set inputFiles, + Map tableToPrefixMap, + Map preExistingCompactionNodes, + ManagedRocksDB... dbs) { + Map liveFileMetaDataMap = new HashMap<>(); + int dbIdx = 0; for (Iterator fileIterator = inputFiles.iterator(); fileIterator.hasNext();) { - String filepath = fileIterator.next(); - if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath, - tableToPrefixMap)) { + String filename = FilenameUtils.getBaseName(fileIterator.next()); + while (!preExistingCompactionNodes.containsKey(filename) && !liveFileMetaDataMap.containsKey(filename) + && dbIdx < dbs.length) { + liveFileMetaDataMap.putAll(dbs[dbIdx].getLiveMetadataForSSTFiles()); + dbIdx += 1; + } + CompactionNode compactionNode = preExistingCompactionNodes.get(filename); + if (compactionNode == null) { + compactionNode = new CompactionNode(new CompactionFileInfo.Builder(filename) + .setValues(liveFileMetaDataMap.get(filename)).build()); + } + if (shouldSkipNode(compactionNode, tableToPrefixMap)) { fileIterator.remove(); } } } - public static boolean doesSstFileContainKeyRange(String filepath, - Map tableToPrefixMap) throws IOException { - - try ( - ManagedOptions options = new ManagedOptions(); - ManagedSstFileReader sstFileReader = new ManagedSstFileReader(options)) { - sstFileReader.open(filepath); - TableProperties properties = sstFileReader.getTableProperties(); - String tableName = new String(properties.getColumnFamilyName(), UTF_8); - if (tableToPrefixMap.containsKey(tableName)) { - String prefix = tableToPrefixMap.get(tableName); + @VisibleForTesting + static boolean shouldSkipNode(CompactionNode node, + Map columnFamilyToPrefixMap) { + // This is for backward compatibility. Before the compaction log table + // migration, startKey, endKey and columnFamily information is not persisted + // in compaction log files. + // Also for the scenario when there is an exception in reading SST files + // for the file node. + if (node.getStartKey() == null || node.getEndKey() == null || + node.getColumnFamily() == null) { + LOG.debug("Compaction node with fileName: {} doesn't have startKey, " + + "endKey and columnFamily details.", node.getFileName()); + return false; + } - try ( - ManagedReadOptions readOptions = new ManagedReadOptions(); - ManagedSstFileReaderIterator iterator = ManagedSstFileReaderIterator.managed( - sstFileReader.newIterator(readOptions))) { - iterator.get().seek(prefix.getBytes(UTF_8)); - String seekResultKey = new String(iterator.get().key(), UTF_8); - return seekResultKey.startsWith(prefix); - } - } + if (MapUtils.isEmpty(columnFamilyToPrefixMap)) { + LOG.debug("Provided columnFamilyToPrefixMap is null or empty."); return false; - } catch (RocksDBException e) { - LOG.error("Failed to read SST File ", e); - throw new IOException(e); } - } + if (!columnFamilyToPrefixMap.containsKey(node.getColumnFamily())) { + LOG.debug("SstFile node: {} is for columnFamily: {} while filter map " + + "contains columnFamilies: {}.", node.getFileName(), + node.getColumnFamily(), columnFamilyToPrefixMap.keySet()); + return true; + } + String keyPrefix = columnFamilyToPrefixMap.get(node.getColumnFamily()); + return !isKeyWithPrefixPresent(keyPrefix, node.getStartKey(), + node.getEndKey()); + } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index 0164e3a23bd..4f04abb8b5b 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -21,6 +21,7 @@ import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MINUTES; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.graph.GraphBuilder; import java.io.File; @@ -38,6 +39,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -48,10 +50,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Consumer; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; import com.google.common.graph.MutableGraph; +import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -67,14 +71,18 @@ import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; +import org.apache.ozone.rocksdb.util.RdbUtil; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.NodeComparator; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.LiveFileMetaData; @@ -101,6 +109,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -231,6 +241,29 @@ public void cleanUp() { } } + private static List getPrunedCompactionEntries(boolean prune, Map metadata) { + List entries = new ArrayList<>(); + if (!prune) { + entries.add(createCompactionEntry(1, + now(), + Arrays.asList("1", "2"), + Arrays.asList("4", "5"), metadata)); + } + entries.addAll(Arrays.asList(createCompactionEntry(2, + now(), + Arrays.asList("4", "5"), + Collections.singletonList("10"), metadata), + createCompactionEntry(3, + now(), + Arrays.asList("3", "13", "14"), + Arrays.asList("6", "7"), metadata), + createCompactionEntry(4, + now(), + Arrays.asList("6", "7"), + Collections.singletonList("11"), metadata))); + return entries; + } + /** * Test cases for testGetSSTDiffListWithoutDB. */ @@ -306,13 +339,19 @@ private static Stream casesGetSSTDiffListWithoutDB() { ); DifferSnapshotInfo snapshotInfo1 = new DifferSnapshotInfo( - "/path/to/dbcp1", UUID.randomUUID(), 3008L, null, null); + "/path/to/dbcp1", UUID.randomUUID(), 3008L, null, Mockito.mock(ManagedRocksDB.class)); DifferSnapshotInfo snapshotInfo2 = new DifferSnapshotInfo( - "/path/to/dbcp2", UUID.randomUUID(), 14980L, null, null); + "/path/to/dbcp2", UUID.randomUUID(), 14980L, null, Mockito.mock(ManagedRocksDB.class)); DifferSnapshotInfo snapshotInfo3 = new DifferSnapshotInfo( - "/path/to/dbcp3", UUID.randomUUID(), 17975L, null, null); + "/path/to/dbcp3", UUID.randomUUID(), 17975L, null, Mockito.mock(ManagedRocksDB.class)); DifferSnapshotInfo snapshotInfo4 = new DifferSnapshotInfo( - "/path/to/dbcp4", UUID.randomUUID(), 18000L, null, null); + "/path/to/dbcp4", UUID.randomUUID(), 18000L, null, Mockito.mock(ManagedRocksDB.class)); + + Map prefixMap = ImmutableMap.of("col1", "c", "col2", "d"); + DifferSnapshotInfo snapshotInfo5 = new DifferSnapshotInfo( + "/path/to/dbcp2", UUID.randomUUID(), 0L, prefixMap, Mockito.mock(ManagedRocksDB.class)); + DifferSnapshotInfo snapshotInfo6 = new DifferSnapshotInfo( + "/path/to/dbcp2", UUID.randomUUID(), 100L, prefixMap, Mockito.mock(ManagedRocksDB.class)); Set snapshotSstFiles1 = ImmutableSet.of("000059", "000053"); Set snapshotSstFiles2 = ImmutableSet.of("000088", "000059", @@ -342,7 +381,9 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000105", "000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000066", "000105", "000080", "000087", "000073", + "000095"), + false, Collections.emptyMap()), Arguments.of("Test 2: Compaction log file crafted input: " + "One source ('to' snapshot) SST file is never compacted " + "(newly flushed)", @@ -354,7 +395,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles3, ImmutableSet.of("000088", "000105", "000059", "000053", "000095"), ImmutableSet.of("000108"), - false), + ImmutableSet.of("000108"), + false, Collections.emptyMap()), Arguments.of("Test 3: Compaction log file crafted input: " + "Same SST files found during SST expansion", compactionLog, @@ -365,7 +407,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt1, ImmutableSet.of("000066", "000059", "000053"), ImmutableSet.of("000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000080", "000087", "000073", "000095"), + false, Collections.emptyMap()), Arguments.of("Test 4: Compaction log file crafted input: " + "Skipping known processed SST.", compactionLog, @@ -376,7 +419,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt2, Collections.emptySet(), Collections.emptySet(), - true), + Collections.emptySet(), + true, Collections.emptyMap()), Arguments.of("Test 5: Compaction log file hit snapshot" + " generation early exit condition", compactionLog, @@ -387,7 +431,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1, ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), - false), + ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), + false, Collections.emptyMap()), Arguments.of("Test 6: Compaction log table regular case. " + "Expands expandable SSTs in the initial diff.", null, @@ -399,7 +444,9 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000105", "000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000066", "000105", "000080", "000087", "000073", + "000095"), + false, Collections.emptyMap()), Arguments.of("Test 7: Compaction log table crafted input: " + "One source ('to' snapshot) SST file is never compacted " + "(newly flushed)", @@ -411,7 +458,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles3, ImmutableSet.of("000088", "000105", "000059", "000053", "000095"), ImmutableSet.of("000108"), - false), + ImmutableSet.of("000108"), + false, Collections.emptyMap()), Arguments.of("Test 8: Compaction log table crafted input: " + "Same SST files found during SST expansion", null, @@ -422,7 +470,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt1, ImmutableSet.of("000066", "000059", "000053"), ImmutableSet.of("000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000080", "000087", "000073", "000095"), + false, Collections.emptyMap()), Arguments.of("Test 9: Compaction log table crafted input: " + "Skipping known processed SST.", null, @@ -433,7 +482,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt2, Collections.emptySet(), Collections.emptySet(), - true), + Collections.emptySet(), + true, Collections.emptyMap()), Arguments.of("Test 10: Compaction log table hit snapshot " + "generation early exit condition", null, @@ -444,7 +494,64 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1, ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), - false) + ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), + false, Collections.emptyMap()), + Arguments.of("Test 11: Older Compaction log got pruned and source snapshot delta files would be " + + "unreachable", + null, + getPrunedCompactionEntries(false, Collections.emptyMap()), + snapshotInfo6, + snapshotInfo5, + ImmutableSet.of("10", "11", "8", "9", "12"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("2", "8", "9", "12"), + ImmutableSet.of("2", "8", "9", "12"), + false, Collections.emptyMap()), + Arguments.of("Test 12: Older Compaction log got pruned and source snapshot delta files would be " + + "unreachable", + null, + getPrunedCompactionEntries(true, Collections.emptyMap()), + snapshotInfo6, + snapshotInfo5, + ImmutableSet.of("10", "11", "8", "9", "12"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("3", "13", "14"), + ImmutableSet.of("4", "5", "8", "9", "12"), + null, + false, Collections.emptyMap()), + Arguments.of("Test 13: Compaction log to test filtering logic based on range and column family", + null, + getPrunedCompactionEntries(false, + new HashMap() {{ + put("1", new String[]{"a", "c", "col1"}); + put("3", new String[]{"a", "d", "col2"}); + put("13", new String[]{"a", "c", "col13"}); + put("14", new String[]{"a", "c", "col1"}); + put("2", new String[]{"a", "c", "col1"}); + put("4", new String[]{"a", "b", "col1"}); + put("5", new String[]{"b", "b", "col1"}); + put("10", new String[]{"a", "b", "col1"}); + put("8", new String[]{"a", "b", "col1"}); + put("6", new String[]{"a", "z", "col13"}); + put("7", new String[]{"a", "z", "col13"}); + }}), + snapshotInfo6, + snapshotInfo5, + ImmutableSet.of("10", "11", "8", "9", "12", "15"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("1", "13", "3", "14"), + ImmutableSet.of("2", "8", "9", "12", "15"), + ImmutableSet.of("2", "9", "12"), + false, + ImmutableMap.of( + "2", new String[]{"a", "b", "col1"}, + "12", new String[]{"a", "d", "col2"}, + "8", new String[]{"a", "b", "col1"}, + "9", new String[]{"a", "c", "col1"}, + "15", new String[]{"a", "z", "col13"} + )) + ); } @@ -464,48 +571,94 @@ public void testGetSSTDiffListWithoutDB(String description, Set destSnapshotSstFiles, Set expectedSameSstFiles, Set expectedDiffSstFiles, - boolean expectingException) { - - boolean exceptionThrown = false; - - if (compactionLog != null) { - // Construct DAG from compaction log input - Arrays.stream(compactionLog.split("\n")).forEach( - rocksDBCheckpointDiffer::processCompactionLogLine); - } else if (compactionLogEntries != null) { - compactionLogEntries.forEach(entry -> - rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); - } else { - throw new IllegalArgumentException("One of compactionLog and " + - "compactionLogEntries should be non-null."); - } - rocksDBCheckpointDiffer.loadAllCompactionLogs(); - - Set actualSameSstFiles = new HashSet<>(); - Set actualDiffSstFiles = new HashSet<>(); - - try { - rocksDBCheckpointDiffer.internalGetSSTDiffList( - srcSnapshot, - destSnapshot, - srcSnapshotSstFiles, - destSnapshotSstFiles, - actualSameSstFiles, - actualDiffSstFiles); - } catch (RuntimeException rtEx) { - if (!expectingException) { - fail("Unexpected exception thrown in test."); + Set expectedSSTDiffFiles, + boolean expectingException, + Map metaDataMap) { + try (MockedStatic mockedRocksdiffUtil = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedRocksdiffUtil.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); + boolean exceptionThrown = false; + if (compactionLog != null) { + // Construct DAG from compaction log input + Arrays.stream(compactionLog.split("\n")).forEach( + rocksDBCheckpointDiffer::processCompactionLogLine); + } else if (compactionLogEntries != null) { + compactionLogEntries.forEach(entry -> + rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); } else { - exceptionThrown = true; + throw new IllegalArgumentException("One of compactionLog and " + + "compactionLogEntries should be non-null."); + } + rocksDBCheckpointDiffer.loadAllCompactionLogs(); + + Set actualSameSstFiles = new HashSet<>(); + Set actualDiffSstFiles = new HashSet<>(); + + try { + rocksDBCheckpointDiffer.internalGetSSTDiffList( + srcSnapshot, + destSnapshot, + srcSnapshotSstFiles, + destSnapshotSstFiles, + actualSameSstFiles, + actualDiffSstFiles); + } catch (RuntimeException rtEx) { + if (!expectingException) { + fail("Unexpected exception thrown in test."); + } else { + exceptionThrown = true; + } } - } - // Check same and different SST files result - assertEquals(expectedSameSstFiles, actualSameSstFiles); - assertEquals(expectedDiffSstFiles, actualDiffSstFiles); + if (expectingException && !exceptionThrown) { + fail("Expecting exception but none thrown."); + } - if (expectingException && !exceptionThrown) { - fail("Expecting exception but none thrown."); + // Check same and different SST files result + assertEquals(expectedSameSstFiles, actualSameSstFiles); + assertEquals(expectedDiffSstFiles, actualDiffSstFiles); + try (MockedStatic mockedHandler = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS)) { + RocksDB rocksDB = Mockito.mock(RocksDB.class); + Mockito.when(rocksDB.getName()).thenReturn("dummy"); + Mockito.when(srcSnapshot.getRocksDB().get()).thenReturn(rocksDB); + Mockito.when(destSnapshot.getRocksDB().get()).thenReturn(rocksDB); + Mockito.when(srcSnapshot.getRocksDB().getLiveMetadataForSSTFiles()) + .thenAnswer(invocation -> srcSnapshotSstFiles.stream().filter(metaDataMap::containsKey).map(file -> { + LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); + String[] metaData = metaDataMap.get(file); + Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + file + SST_FILE_EXTENSION); + Mockito.when(liveFileMetaData.smallestKey()).thenReturn(metaData[0].getBytes(UTF_8)); + Mockito.when(liveFileMetaData.largestKey()).thenReturn(metaData[1].getBytes(UTF_8)); + Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(metaData[2].getBytes(UTF_8)); + return liveFileMetaData; + }).collect(Collectors.toMap(liveFileMetaData -> FilenameUtils.getBaseName(liveFileMetaData.fileName()), + Function.identity()))); + mockedHandler.when(() -> RdbUtil.getLiveSSTFilesForCFs(any(), any())) + .thenAnswer(i -> { + Set sstFiles = i.getArgument(0).equals(srcSnapshot.getRocksDB()) ? srcSnapshotSstFiles + : destSnapshotSstFiles; + return sstFiles.stream().map(fileName -> { + LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); + Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + fileName + SST_FILE_EXTENSION); + return liveFileMetaData; + }).collect(Collectors.toList()); + }); + try { + Assertions.assertEquals(Optional.ofNullable(expectedSSTDiffFiles) + .map(files -> files.stream().sorted().collect(Collectors.toList())).orElse(null), + rocksDBCheckpointDiffer.getSSTDiffList(srcSnapshot, destSnapshot) + .map(i -> i.stream().sorted().collect(Collectors.toList())).orElse(null)); + } catch (RuntimeException rtEx) { + if (!expectingException) { + fail("Unexpected exception thrown in test."); + } else { + exceptionThrown = true; + } + } + } + if (expectingException && !exceptionThrown) { + fail("Expecting exception but none thrown."); + } } } @@ -539,7 +692,12 @@ void testDifferWithDB() throws Exception { "000017.sst", "000019.sst", "000021.sst", "000023.sst", "000024.sst", "000026.sst", "000029.sst")); } - + rocksDBCheckpointDiffer.getForwardCompactionDAG().nodes().stream().forEach(compactionNode -> { + Assertions.assertNotNull(compactionNode.getStartKey()); + Assertions.assertNotNull(compactionNode.getEndKey()); + }); + GenericTestUtils.waitFor(() -> rocksDBCheckpointDiffer.getInflightCompactions().isEmpty(), 1000, + 10000); if (LOG.isDebugEnabled()) { rocksDBCheckpointDiffer.dumpCompactionNodeTable(); } @@ -587,7 +745,7 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ) int index = 0; for (DifferSnapshotInfo snap : snapshots) { // Returns a list of SST files to be fed into RocksDiff - List sstDiffList = differ.getSSTDiffList(src, snap); + List sstDiffList = differ.getSSTDiffList(src, snap).orElse(Collections.emptyList()); LOG.info("SST diff list from '{}' to '{}': {}", src.getDbPath(), snap.getDbPath(), sstDiffList); @@ -1452,19 +1610,30 @@ private static Stream sstFilePruningScenarios() { ); } - private static CompactionLogEntry createCompactionEntry( - long dbSequenceNumber, - long compactionTime, - List inputFiles, - List outputFiles - ) { + private static CompactionLogEntry createCompactionEntry(long dbSequenceNumber, + long compactionTime, + List inputFiles, + List outputFiles) { + return createCompactionEntry(dbSequenceNumber, compactionTime, inputFiles, outputFiles, Collections.emptyMap()); + } + + private static CompactionLogEntry createCompactionEntry(long dbSequenceNumber, + long compactionTime, + List inputFiles, + List outputFiles, + Map metadata) { return new CompactionLogEntry.Builder(dbSequenceNumber, compactionTime, - toFileInfoList(inputFiles), toFileInfoList(outputFiles)).build(); + toFileInfoList(inputFiles, metadata), toFileInfoList(outputFiles, metadata)).build(); } - private static List toFileInfoList(List files) { + private static List toFileInfoList(List files, + Map metadata) { return files.stream() - .map(fileName -> new CompactionFileInfo.Builder(fileName).build()) + .map(fileName -> new CompactionFileInfo.Builder(fileName) + .setStartRange(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[0]).orElse(null)) + .setEndRange(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[1]).orElse(null)) + .setColumnFamily(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[2]).orElse(null)) + .build()) .collect(Collectors.toList()); } @@ -1794,7 +1963,7 @@ public void testShouldSkipNode(Map columnFamilyToPrefixMap, .getCompactionNodeMap().values().stream() .sorted(Comparator.comparing(CompactionNode::getFileName)) .map(node -> - rocksDBCheckpointDiffer.shouldSkipNode(node, + RocksDiffUtils.shouldSkipNode(node, columnFamilyToPrefixMap)) .collect(Collectors.toList()); @@ -1831,7 +2000,7 @@ public void testShouldSkipNodeEdgeCase( rocksDBCheckpointDiffer.loadAllCompactionLogs(); - assertEquals(expectedResponse, rocksDBCheckpointDiffer.shouldSkipNode(node, + assertEquals(expectedResponse, RocksDiffUtils.shouldSkipNode(node, columnFamilyToPrefixMap)); } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java index 67233676f0b..ef92aa2c17c 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java @@ -18,10 +18,32 @@ package org.apache.ozone.rocksdiff; +import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.assertj.core.util.Sets; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.rocksdb.LiveFileMetaData; +import org.rocksdb.RocksDB; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.ArgumentMatchers.anyString; /** * Class to test RocksDiffUtils. @@ -54,4 +76,103 @@ public void testFilterFunction() { "/volume/bucket/key-1", "/volume/bucket2/key-97")); } + + public static Stream values() { + return Stream.of( + arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "b", "f"), + arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "e", "f"), + arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "a", "f"), + arguments("validColumnFamily", "validColumnFamily", "a", "d", "e", "g"), + arguments("validColumnFamily", "validColumnFamily", "e", "g", "a", "d"), + arguments("validColumnFamily", "validColumnFamily", "b", "b", "e", "g"), + arguments("validColumnFamily", "validColumnFamily", "a", "d", "e", "e") + ); + } + + @ParameterizedTest + @MethodSource("values") + public void testFilterRelevantSstFilesWithPreExistingCompactionInfo(String validSSTColumnFamilyName, + String invalidColumnFamilyName, + String validSSTFileStartRange, + String validSSTFileEndRange, + String invalidSSTFileStartRange, + String invalidSSTFileEndRange) { + try (MockedStatic mockedHandler = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedHandler.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); + String validSstFile = "filePath/validSSTFile.sst"; + String invalidSstFile = "filePath/invalidSSTFile.sst"; + String untrackedSstFile = "filePath/untrackedSSTFile.sst"; + String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + + validSSTFileStartRange.charAt(0)) / 2)); + Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); + RocksDiffUtils.filterRelevantSstFiles(sstFile, ImmutableMap.of(validSSTColumnFamilyName, expectedPrefix), + ImmutableMap.of("validSSTFile", new CompactionNode(validSstFile, 0, 0, validSSTFileStartRange, + validSSTFileEndRange, validSSTColumnFamilyName), "invalidSSTFile", + new CompactionNode(invalidSstFile, 0, 0, invalidSSTFileStartRange, + invalidSSTFileEndRange, invalidColumnFamilyName))); + Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); + } + + } + + private LiveFileMetaData getMockedLiveFileMetadata(String columnFamilyName, String startRange, + String endRange, + String name) { + LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); + Mockito.when(liveFileMetaData.largestKey()).thenReturn(endRange.getBytes(StandardCharsets.UTF_8)); + Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(columnFamilyName.getBytes(StandardCharsets.UTF_8)); + Mockito.when(liveFileMetaData.smallestKey()).thenReturn(startRange.getBytes(StandardCharsets.UTF_8)); + Mockito.when(liveFileMetaData.fileName()).thenReturn("basePath/" + name + ".sst"); + return liveFileMetaData; + } + + @ParameterizedTest + @MethodSource("values") + public void testFilterRelevantSstFilesFromDB(String validSSTColumnFamilyName, + String invalidColumnFamilyName, + String validSSTFileStartRange, + String validSSTFileEndRange, + String invalidSSTFileStartRange, + String invalidSSTFileEndRange) { + try (MockedStatic mockedHandler = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedHandler.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); + for (int numberOfDBs = 1; numberOfDBs < 10; numberOfDBs++) { + String validSstFile = "filePath/validSSTFile.sst"; + String invalidSstFile = "filePath/invalidSSTFile.sst"; + String untrackedSstFile = "filePath/untrackedSSTFile.sst"; + int expectedDBKeyIndex = numberOfDBs / 2; + ManagedRocksDB[] rocksDBs = + IntStream.range(0, numberOfDBs).mapToObj(i -> Mockito.mock(ManagedRocksDB.class)) + .collect(Collectors.toList()).toArray(new ManagedRocksDB[numberOfDBs]); + for (int i = 0; i < numberOfDBs; i++) { + ManagedRocksDB managedRocksDB = rocksDBs[i]; + RocksDB mockedRocksDB = Mockito.mock(RocksDB.class); + Mockito.when(managedRocksDB.get()).thenReturn(mockedRocksDB); + if (i == expectedDBKeyIndex) { + LiveFileMetaData validLiveFileMetaData = getMockedLiveFileMetadata(validSSTColumnFamilyName, + validSSTFileStartRange, validSSTFileEndRange, "validSSTFile"); + LiveFileMetaData invalidLiveFileMetaData = getMockedLiveFileMetadata(invalidColumnFamilyName, + invalidSSTFileStartRange, invalidSSTFileEndRange, "invalidSSTFile"); + List liveFileMetaDatas = Arrays.asList(validLiveFileMetaData, invalidLiveFileMetaData); + Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(liveFileMetaDatas); + } else { + Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(Collections.emptyList()); + } + Mockito.when(managedRocksDB.getLiveMetadataForSSTFiles()) + .thenAnswer(invocation -> ManagedRocksDB.getLiveMetadataForSSTFiles(mockedRocksDB)); + } + + String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + + validSSTFileStartRange.charAt(0)) / 2)); + Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); + RocksDiffUtils.filterRelevantSstFiles(sstFile, ImmutableMap.of(validSSTColumnFamilyName, expectedPrefix), + Collections.emptyMap(), rocksDBs); + Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); + } + + } + + } } diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 32408e8904b..4c2e40c3759 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -20,15 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-server-scm - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Storage Container Manager Server Apache Ozone HDDS SCM Server jar + false diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java index a792e2cea6b..05eb32722e7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java @@ -73,7 +73,7 @@ ContainerPlacementStatus validateContainerPlacement( * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas: Map of replicas with value signifying if + * @param replicas Map of replicas with value signifying if * replica can be copied */ Set replicasToCopyToFixMisreplication( @@ -82,8 +82,8 @@ Set replicasToCopyToFixMisreplication( /** * Given a set of replicas of a container which are overreplicated, * return a set of replicas to delete to fix overreplication. - * @param replicas: Set of existing replicas of the container - * @param expectedCountPerUniqueReplica: Replication factor of each + * @param replicas Set of existing replicas of the container + * @param expectedCountPerUniqueReplica Replication factor of each * unique replica */ Set replicasToRemoveToFixOverreplication( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 471a9479412..2a1c6fce0c0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -525,7 +525,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas: Map of replicas with value signifying if + * @param replicas Map of replicas with value signifying if * replica can be copied */ @Override @@ -582,7 +582,7 @@ protected Node getPlacementGroup(DatanodeDetails dn) { * replication is computed. * The algorithm starts with creating a replicaIdMap which contains the * replicas grouped by replica Index. A placementGroup Map is created which - * groups replicas based on their rack & the replicas within the rack + * groups replicas based on their rack and the replicas within the rack * are further grouped based on the replica Index. * A placement Group Count Map is created which keeps * track of the count of replicas in each rack. @@ -590,13 +590,13 @@ protected Node getPlacementGroup(DatanodeDetails dn) { * order based on their current replication factor in a descending factor. * For each replica Index the replica is removed from the rack which contains * the most replicas, in order to achieve this the racks are put - * into priority queue & are based on the number of replicas they have. - * The replica is removed from the rack with maximum replicas & the replica - * to be removed is also removed from the maps created above & + * into priority queue and are based on the number of replicas they have. + * The replica is removed from the rack with maximum replicas and the replica + * to be removed is also removed from the maps created above and * the count for rack is reduced. * The set of replicas computed are then returned by the function. - * @param replicas: Set of existing replicas of the container - * @param expectedCountPerUniqueReplica: Replication factor of each + * @param replicas Set of existing replicas of the container + * @param expectedCountPerUniqueReplica Replication factor of each * * unique replica * @return Set of replicas to be removed are computed. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java index e485fcc98d9..99fd9c7b431 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java @@ -47,7 +47,8 @@ void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) { blocksDeleted += tx.getLocalIDCount(); if (SCMBlockDeletingService.LOG.isDebugEnabled()) { SCMBlockDeletingService.LOG - .debug("Transaction added: {} <- TX({})", dnID, tx.getTxID()); + .debug("Transaction added: {} <- TX({}), DN {} <- blocksDeleted Add {}.", + dnID, tx.getTxID(), dnID, tx.getLocalIDCount()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java index 45d53c0ef2c..5ec68c78d74 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java @@ -57,7 +57,7 @@ DatanodeDeletedBlockTransactions getTransactions( * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if < 0. return all. + * @param count Maximum num of returned transactions, if < 0. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 9d5377b9e3e..a83ce085dd8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -94,6 +93,7 @@ public class DeletedBlockLogImpl private long scmCommandTimeoutMs = Duration.ofSeconds(300).toMillis(); private static final int LIST_ALL_FAILED_TRANSACTIONS = -1; + private long lastProcessedTransactionId = -1; public DeletedBlockLogImpl(ConfigurationSource conf, StorageContainerManager scm, @@ -138,6 +138,7 @@ public List getFailedTransactions(int count, } } } else { + iter.seek(startTxId); while (iter.hasNext() && failedTXs.size() < count) { DeletedBlocksTransaction delTX = iter.next().getValue(); if (delTX.getCount() == -1 && delTX.getTxID() >= startTxId) { @@ -200,20 +201,6 @@ private DeletedBlocksTransaction constructNewTransaction( .build(); } - private boolean isTransactionFailed(DeleteBlockTransactionResult result) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Got block deletion ACK from datanode, TXIDs={}, " + "success={}", - result.getTxID(), result.getSuccess()); - } - if (!result.getSuccess()) { - LOG.warn("Got failed ACK for TXID={}, prepare to resend the " - + "TX in next interval", result.getTxID()); - return true; - } - return false; - } - @Override public int getNumOfValidTransactions() throws IOException { lock.lock(); @@ -300,26 +287,46 @@ private void getTransaction(DeletedBlocksTransaction tx, .setCount(transactionStatusManager.getOrDefaultRetryCount( tx.getTxID(), 0)) .build(); + for (ContainerReplica replica : replicas) { DatanodeDetails details = replica.getDatanodeDetails(); - if (!dnList.contains(details)) { - continue; - } if (!transactionStatusManager.isDuplication( details, updatedTxn.getTxID(), commandStatus)) { transactions.addTransactionToDN(details.getUuid(), updatedTxn); + metrics.incrProcessedTransaction(); } } } private Boolean checkInadequateReplica(Set replicas, - DeletedBlocksTransaction txn) throws ContainerNotFoundException { + DeletedBlocksTransaction txn, + Set dnList) throws ContainerNotFoundException { ContainerInfo containerInfo = containerManager .getContainer(ContainerID.valueOf(txn.getContainerID())); ReplicationManager replicationManager = scmContext.getScm().getReplicationManager(); ContainerHealthResult result = replicationManager .getContainerReplicationHealth(containerInfo, replicas); + + // We have made an improvement here, and we expect that all replicas + // of the Container being sent will be included in the dnList. + // This change benefits ACK confirmation and improves deletion speed. + // The principle behind it is that + // DN can receive the command to delete a certain Container at the same time and provide + // feedback to SCM at roughly the same time. + // This avoids the issue of deletion blocking, + // where some replicas of a Container are deleted while others do not receive the delete command. + long containerId = txn.getContainerID(); + for (ContainerReplica replica : replicas) { + DatanodeDetails datanodeDetails = replica.getDatanodeDetails(); + if (!dnList.contains(datanodeDetails)) { + DatanodeDetails dnDetail = replica.getDatanodeDetails(); + LOG.debug("Skip Container = {}, because DN = {} is not in dnList.", + containerId, dnDetail.getUuid()); + return true; + } + } + return result.getHealthState() != ContainerHealthResult.HealthState.HEALTHY; } @@ -338,6 +345,34 @@ public DatanodeDeletedBlockTransactions getTransactions( try (TableIterator> iter = deletedBlockLogStateManager.getReadOnlyIterator()) { + if (lastProcessedTransactionId != -1) { + iter.seek(lastProcessedTransactionId); + /* + * We should start from (lastProcessedTransactionId + 1) transaction. + * Now the iterator (iter.next call) is pointing at + * lastProcessedTransactionId, read the current value to move + * the cursor. + */ + if (iter.hasNext()) { + /* + * There is a possibility that the lastProcessedTransactionId got + * deleted from the table, in that case we have to set + * lastProcessedTransactionId to next available transaction in the table. + * + * By doing this there is a chance that we will skip processing the new + * lastProcessedTransactionId, that should be ok. We can get to it in the + * next run. + */ + lastProcessedTransactionId = iter.next().getKey(); + } + + // If we have reached the end, go to beginning. + if (!iter.hasNext()) { + iter.seekToFirst(); + lastProcessedTransactionId = -1; + } + } + // Get the CmdStatus status of the aggregation, so that the current // status of the specified transaction can be found faster Map> commandStatus = @@ -345,39 +380,60 @@ public DatanodeDeletedBlockTransactions getTransactions( .getCommandStatusByTxId(dnList.stream(). map(DatanodeDetails::getUuid).collect(Collectors.toSet())); ArrayList txIDs = new ArrayList<>(); + metrics.setNumBlockDeletionTransactionDataNodes(dnList.size()); + Table.KeyValue keyValue = null; // Here takes block replica count as the threshold to avoid the case // that part of replicas committed the TXN and recorded in the // SCMDeletedBlockTransactionStatusManager, while they are counted // in the threshold. while (iter.hasNext() && transactions.getBlocksDeleted() < blockDeletionLimit) { - Table.KeyValue keyValue = iter.next(); + keyValue = iter.next(); DeletedBlocksTransaction txn = keyValue.getValue(); final ContainerID id = ContainerID.valueOf(txn.getContainerID()); try { // HDDS-7126. When container is under replicated, it is possible // that container is deleted, but transactions are not deleted. if (containerManager.getContainer(id).isDeleted()) { - LOG.warn("Container: " + id + " was deleted for the " + - "transaction: " + txn); + LOG.warn("Container: {} was deleted for the " + + "transaction: {}.", id, txn); txIDs.add(txn.getTxID()); } else if (txn.getCount() > -1 && txn.getCount() <= maxRetry && !containerManager.getContainer(id).isOpen()) { Set replicas = containerManager .getContainerReplicas( ContainerID.valueOf(txn.getContainerID())); - if (checkInadequateReplica(replicas, txn)) { + if (checkInadequateReplica(replicas, txn, dnList)) { + metrics.incrSkippedTransaction(); continue; } getTransaction( txn, transactions, dnList, replicas, commandStatus); + } else if (txn.getCount() >= maxRetry || containerManager.getContainer(id).isOpen()) { + metrics.incrSkippedTransaction(); } } catch (ContainerNotFoundException ex) { - LOG.warn("Container: " + id + " was not found for the transaction: " - + txn); + LOG.warn("Container: {} was not found for the transaction: {}.", id, txn); txIDs.add(txn.getTxID()); } + + if (lastProcessedTransactionId == keyValue.getKey()) { + // We have circled back to the last transaction. + break; + } + + if (!iter.hasNext() && lastProcessedTransactionId != -1) { + /* + * We started from in-between and reached end of the table, + * now we should go to the start of the table and process + * the transactions. + */ + iter.seekToFirst(); + } } + + lastProcessedTransactionId = keyValue != null ? keyValue.getKey() : -1; + if (!txIDs.isEmpty()) { deletedBlockLogStateManager.removeTransactionsFromDB(txIDs); metrics.incrBlockDeletionTransactionCompleted(txIDs.size()); @@ -436,8 +492,10 @@ public void onMessage( getSCMDeletedBlockTransactionStatusManager() .commitTransactions(ackProto.getResultsList(), dnId); metrics.incrBlockDeletionCommandSuccess(); + metrics.incrDNCommandsSuccess(dnId, 1); } else if (status == CommandStatus.Status.FAILED) { metrics.incrBlockDeletionCommandFailure(); + metrics.incrDNCommandsFailure(dnId, 1); } else { LOG.debug("Delete Block Command {} is not executed on the Datanode" + " {}.", commandStatus.getCmdId(), dnId); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java index ceeb2786135..43809acf4bc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java @@ -133,7 +133,8 @@ public void close() throws IOException { @Override public void seekToFirst() { - throw new UnsupportedOperationException("seekToFirst"); + iter.seekToFirst(); + findNext(); } @Override @@ -144,7 +145,9 @@ public void seekToLast() { @Override public TypedTable.KeyValue seek( Long key) throws IOException { - throw new UnsupportedOperationException("seek"); + iter.seek(key); + findNext(); + return nextTx; } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java index 7271d9dcba6..e3f80f8fd40 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java @@ -167,8 +167,9 @@ public EmptyTaskResult call() throws Exception { // These nodes will be considered for this iteration. final Set included = getDatanodesWithinCommandLimit(datanodes); + int blockDeletionLimit = getBlockDeleteTXNum(); DatanodeDeletedBlockTransactions transactions = - deletedBlockLog.getTransactions(getBlockDeleteTXNum(), included); + deletedBlockLog.getTransactions(blockDeletionLimit, included); if (transactions.isEmpty()) { return EmptyTaskResult.newResult(); @@ -192,6 +193,7 @@ public EmptyTaskResult call() throws Exception { new CommandForDatanode<>(dnId, command)); metrics.incrBlockDeletionCommandSent(); metrics.incrBlockDeletionTransactionSent(dnTXs.size()); + metrics.incrDNCommandsSent(dnId, 1); if (LOG.isDebugEnabled()) { LOG.debug( "Added delete block command for datanode {} in the queue," @@ -203,9 +205,11 @@ public EmptyTaskResult call() throws Exception { } } LOG.info("Totally added {} blocks to be deleted for" - + " {} datanodes, task elapsed time: {}ms", + + " {} datanodes / {} totalnodes, limit per iteration : {}blocks, task elapsed time: {}ms", transactions.getBlocksDeleted(), transactions.getDatanodeTransactionMap().size(), + included.size(), + blockDeletionLimit, Time.monotonicNow() - startTime); deletedBlockLog.incrementCount(new ArrayList<>(processedTxIDs)); } catch (NotLeaderException nle) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java index 2cadca1d92a..d4dd7933ea7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java @@ -19,22 +19,36 @@ package org.apache.hadoop.hdds.scm.block; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; + +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.Interns; + +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; /** * Metrics related to Block Deleting Service running in SCM. */ @Metrics(name = "ScmBlockDeletingService Metrics", about = "Metrics related to " + "background block deleting service in SCM", context = "SCM") -public final class ScmBlockDeletingServiceMetrics { +public final class ScmBlockDeletingServiceMetrics implements MetricsSource { private static ScmBlockDeletingServiceMetrics instance; public static final String SOURCE_NAME = SCMBlockDeletingService.class.getSimpleName(); + private final MetricsRegistry registry; /** * Given all commands are finished and no new coming deletes from OM. @@ -76,10 +90,22 @@ public final class ScmBlockDeletingServiceMetrics { @Metric(about = "The number of created txs which are added into DB.") private MutableCounterLong numBlockDeletionTransactionCreated; + @Metric(about = "The number of skipped transactions") + private MutableCounterLong numSkippedTransactions; + + @Metric(about = "The number of processed transactions") + private MutableCounterLong numProcessedTransactions; + + @Metric(about = "The number of dataNodes of delete transactions.") + private MutableGaugeLong numBlockDeletionTransactionDataNodes; + + private final Map numCommandsDatanode = new ConcurrentHashMap<>(); + private ScmBlockDeletingServiceMetrics() { + this.registry = new MetricsRegistry(SOURCE_NAME); } - public static ScmBlockDeletingServiceMetrics create() { + public static synchronized ScmBlockDeletingServiceMetrics create() { if (instance == null) { MetricsSystem ms = DefaultMetricsSystem.instance(); instance = ms.register(SOURCE_NAME, "SCMBlockDeletingService", @@ -130,6 +156,31 @@ public void incrBlockDeletionTransactionCreated(long count) { this.numBlockDeletionTransactionCreated.incr(count); } + public void incrSkippedTransaction() { + this.numSkippedTransactions.incr(); + } + + public void incrProcessedTransaction() { + this.numProcessedTransactions.incr(); + } + + public void setNumBlockDeletionTransactionDataNodes(long dataNodes) { + this.numBlockDeletionTransactionDataNodes.set(dataNodes); + } + + public void incrDNCommandsSent(UUID id, long delta) { + numCommandsDatanode.computeIfAbsent(id, k -> new DatanodeCommandCounts()) + .incrCommandsSent(delta); + } + public void incrDNCommandsSuccess(UUID id, long delta) { + numCommandsDatanode.computeIfAbsent(id, k -> new DatanodeCommandCounts()) + .incrCommandsSuccess(delta); + } + public void incrDNCommandsFailure(UUID id, long delta) { + numCommandsDatanode.computeIfAbsent(id, k -> new DatanodeCommandCounts()) + .incrCommandsFailure(delta); + } + public long getNumBlockDeletionCommandSent() { return numBlockDeletionCommandSent.value(); } @@ -162,6 +213,124 @@ public long getNumBlockDeletionTransactionCreated() { return numBlockDeletionTransactionCreated.value(); } + public long getNumSkippedTransactions() { + return numSkippedTransactions.value(); + } + + public long getNumProcessedTransactions() { + return numProcessedTransactions.value(); + } + + public long getNumBlockDeletionTransactionDataNodes() { + return numBlockDeletionTransactionDataNodes.value(); + } + + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { + MetricsRecordBuilder builder = metricsCollector.addRecord(SOURCE_NAME); + numBlockDeletionCommandSent.snapshot(builder, all); + numBlockDeletionCommandSuccess.snapshot(builder, all); + numBlockDeletionCommandFailure.snapshot(builder, all); + numBlockDeletionTransactionSent.snapshot(builder, all); + numBlockDeletionTransactionSuccess.snapshot(builder, all); + numBlockDeletionTransactionFailure.snapshot(builder, all); + numBlockDeletionTransactionCompleted.snapshot(builder, all); + numBlockDeletionTransactionCreated.snapshot(builder, all); + numSkippedTransactions.snapshot(builder, all); + numProcessedTransactions.snapshot(builder, all); + numBlockDeletionTransactionDataNodes.snapshot(builder, all); + + MetricsRecordBuilder recordBuilder = builder; + for (Map.Entry e : numCommandsDatanode.entrySet()) { + recordBuilder = recordBuilder.endRecord().addRecord(SOURCE_NAME) + .add(new MetricsTag(Interns.info("datanode", + "Datanode host for deletion commands"), e.getKey().toString())) + .addGauge(DatanodeCommandCounts.COMMANDS_SENT_TO_DN, + e.getValue().getCommandsSent()) + .addGauge(DatanodeCommandCounts.COMMANDS_SUCCESSFUL_EXECUTION_BY_DN, + e.getValue().getCommandsSuccess()) + .addGauge(DatanodeCommandCounts.COMMANDS_FAILED_EXECUTION_BY_DN, + e.getValue().getCommandsFailure()); + } + recordBuilder.endRecord(); + } + + /** + * Class contains metrics related to the ScmBlockDeletingService for each datanode. + */ + public static final class DatanodeCommandCounts { + private long commandsSent; + private long commandsSuccess; + private long commandsFailure; + + private static final MetricsInfo COMMANDS_SENT_TO_DN = Interns.info( + "CommandsSent", + "Number of commands sent from SCM to the datanode for deletion"); + private static final MetricsInfo COMMANDS_SUCCESSFUL_EXECUTION_BY_DN = Interns.info( + "CommandsSuccess", + "Number of commands sent from SCM to the datanode for deletion for which execution succeeded."); + private static final MetricsInfo COMMANDS_FAILED_EXECUTION_BY_DN = Interns.info( + "CommandsFailed", + "Number of commands sent from SCM to the datanode for deletion for which execution failed."); + + public DatanodeCommandCounts() { + this.commandsSent = 0; + this.commandsSuccess = 0; + this.commandsFailure = 0; + } + + public void incrCommandsSent(long delta) { + this.commandsSent += delta; + } + + public void incrCommandsSuccess(long delta) { + this.commandsSuccess += delta; + } + + public void incrCommandsFailure(long delta) { + this.commandsFailure += delta; + } + + public long getCommandsSent() { + return commandsSent; + } + + public long getCommandsSuccess() { + return commandsSuccess; + } + + public long getCommandsFailure() { + return commandsFailure; + } + + @Override + public String toString() { + return "Sent=" + commandsSent + ", Success=" + commandsSuccess + ", Failed=" + commandsFailure; + } + } + + public long getNumCommandsDatanodeSent() { + long sent = 0; + for (DatanodeCommandCounts v : numCommandsDatanode.values()) { + sent += v.commandsSent; + } + return sent; + } + public long getNumCommandsDatanodeSuccess() { + long successCount = 0; + for (DatanodeCommandCounts v : numCommandsDatanode.values()) { + successCount += v.commandsSuccess; + } + return successCount; + } + public long getNumCommandsDatanodeFailed() { + long failCount = 0; + for (DatanodeCommandCounts v : numCommandsDatanode.values()) { + failCount += v.commandsFailure; + } + return failCount; + } + @Override public String toString() { StringBuffer buffer = new StringBuffer(); @@ -180,7 +349,9 @@ public String toString() { .append("numBlockDeletionTransactionSuccess = " + numBlockDeletionTransactionSuccess.value()).append("\t") .append("numBlockDeletionTransactionFailure = " - + numBlockDeletionTransactionFailure.value()); + + numBlockDeletionTransactionFailure.value()).append("\t") + .append("numDeletionCommandsPerDatanode = " + + numCommandsDatanode); return buffer.toString(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 3eba240533e..6b6a888f424 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -61,9 +61,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -85,9 +85,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state @@ -164,7 +164,6 @@ void updateContainerReplica(ContainerID containerID, ContainerReplica replica) * * @param containerID Container ID * @param replica ContainerReplica - * @return True of dataNode is removed successfully else false. */ void removeContainerReplica(ContainerID containerID, ContainerReplica replica) throws ContainerNotFoundException, ContainerReplicaNotFoundException; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 00aee0f62c2..d61f9ee366b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -86,6 +86,8 @@ public class ContainerManagerImpl implements ContainerManager { @SuppressWarnings("java:S2245") // no need for secure random private final Random random = new Random(); + private int maxCountOfContainerList; + /** * */ @@ -115,6 +117,10 @@ public ContainerManagerImpl( .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); + this.maxCountOfContainerList = conf + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java index daadcd824ec..36a51c4e3ca 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java @@ -26,11 +26,13 @@ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.container.report.ContainerReportValidator; +import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .ContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; @@ -199,6 +201,11 @@ public void onMessage(final ContainerReportFromDatanode reportFromDatanode, // list processMissingReplicas(datanodeDetails, expectedContainersInDatanode); containerManager.notifyContainerReportProcessing(true, true); + if (reportFromDatanode.isRegister()) { + publisher.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, + new SCMDatanodeProtocolServer.NodeRegistrationContainerReport(datanodeDetails, + reportFromDatanode.getReport())); + } } } catch (NodeNotFoundException ex) { containerManager.notifyContainerReportProcessing(true, false); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java index dd2d1c57894..df45ffd9b62 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java @@ -32,11 +32,11 @@ import org.slf4j.Logger; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; /** @@ -56,7 +56,7 @@ protected AbstractFindTargetGreedy( ContainerManager containerManager, PlacementPolicyValidateProxy placementPolicyValidateProxy, NodeManager nodeManager) { - sizeEnteringNode = new HashMap<>(); + sizeEnteringNode = new ConcurrentHashMap<>(); this.containerManager = containerManager; this.placementPolicyValidateProxy = placementPolicyValidateProxy; this.nodeManager = nodeManager; @@ -283,4 +283,9 @@ NodeManager getNodeManager() { public Map getSizeEnteringNodes() { return sizeEnteringNode; } + + @Override + public void clearSizeEnteringNodes() { + sizeEnteringNode.clear(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index 3dddd67bd8a..2f6b8a7f814 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -183,17 +183,19 @@ public ContainerBalancerTask.Status getBalancerStatus() { * @return balancer status info if balancer started */ public ContainerBalancerStatusInfo getBalancerStatusInfo() throws IOException { - if (isBalancerRunning()) { - ContainerBalancerConfigurationProto configProto = readConfiguration(ContainerBalancerConfigurationProto.class); - return new ContainerBalancerStatusInfo( - this.startedAt, - configProto, - task.getCurrentIterationsStatistic() - ); - } else { + lock.lock(); + try { + if (isBalancerRunning()) { + return new ContainerBalancerStatusInfo( + this.startedAt, + config.toProtobufBuilder().setShouldRun(true).build(), + task.getCurrentIterationsStatistic() + ); + } return null; + } finally { + lock.unlock(); } - } /** * Checks if ContainerBalancer is in valid state to call stop. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java index 6446089db35..3e164cb0bba 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java @@ -40,6 +40,9 @@ public final class ContainerBalancerMetrics { " in the latest iteration.") private MutableCounterLong dataSizeMovedGBInLatestIteration; + @Metric(about = "Amount of bytes that the Container Balancer moved in the latest iteration.") + private MutableCounterLong dataSizeMovedBytesInLatestIteration; + @Metric(about = "Number of completed container moves performed by " + "Container Balancer in the latest iteration.") private MutableCounterLong numContainerMovesCompletedInLatestIteration; @@ -131,14 +134,16 @@ void incrementNumContainerMovesScheduledInLatestIteration(long valueToAdd) { this.numContainerMovesScheduledInLatestIteration.incr(valueToAdd); } + /** + * Reset the number of containers scheduled to move in the last iteration. + */ public void resetNumContainerMovesScheduledInLatestIteration() { numContainerMovesScheduledInLatestIteration.incr( -getNumContainerMovesScheduledInLatestIteration()); } /** - * Gets the amount of data moved by Container Balancer in the latest - * iteration. + * Retrieves the amount of data moved by the Container Balancer in the latest iteration. * @return size in GB */ public long getDataSizeMovedGBInLatestIteration() { @@ -154,6 +159,29 @@ public void resetDataSizeMovedGBInLatestIteration() { -getDataSizeMovedGBInLatestIteration()); } + /** + * Retrieves the amount of data moved by the Container Balancer in the latest iteration. + * @return size in bytes + */ + public long getDataSizeMovedInLatestIteration() { + return dataSizeMovedBytesInLatestIteration.value(); + } + + /** + * Increment the amount of data moved in the last iteration. + * @param bytes bytes to add + */ + public void incrementDataSizeMovedInLatestIteration(long bytes) { + this.dataSizeMovedBytesInLatestIteration.incr(bytes); + } + + /** + * Reset the amount of data moved in the last iteration. + */ + public void resetDataSizeMovedInLatestIteration() { + dataSizeMovedBytesInLatestIteration.incr(-getDataSizeMovedInLatestIteration()); + } + /** * Gets the number of container moves performed by Container Balancer in the * latest iteration. @@ -163,11 +191,6 @@ public long getNumContainerMovesCompletedInLatestIteration() { return numContainerMovesCompletedInLatestIteration.value(); } - public void incrementNumContainerMovesCompletedInLatestIteration( - long valueToAdd) { - this.numContainerMovesCompletedInLatestIteration.incr(valueToAdd); - } - public void incrementCurrentIterationContainerMoveMetric( MoveManager.MoveResult result, long valueToAdd) { if (result == null) { @@ -204,9 +227,11 @@ public void incrementCurrentIterationContainerMoveMetric( } } + /** + * Reset the number of containers moved in the last iteration. + */ public void resetNumContainerMovesCompletedInLatestIteration() { - numContainerMovesCompletedInLatestIteration.incr( - -getNumContainerMovesCompletedInLatestIteration()); + numContainerMovesCompletedInLatestIteration.incr(-getNumContainerMovesCompletedInLatestIteration()); } /** @@ -218,14 +243,19 @@ public long getNumContainerMovesTimeoutInLatestIteration() { return numContainerMovesTimeoutInLatestIteration.value(); } + /** + * Increases the number of timeout container moves in the latest iteration. + */ public void incrementNumContainerMovesTimeoutInLatestIteration( long valueToAdd) { this.numContainerMovesTimeoutInLatestIteration.incr(valueToAdd); } + /** + * Reset the number of timeout container moves in the latest iteration. + */ public void resetNumContainerMovesTimeoutInLatestIteration() { - numContainerMovesTimeoutInLatestIteration.incr( - -getNumContainerMovesTimeoutInLatestIteration()); + numContainerMovesTimeoutInLatestIteration.incr(-getNumContainerMovesTimeoutInLatestIteration()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java index 3965a159323..380b019f89a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -143,18 +142,6 @@ private Comparator orderContainersByUsedBytes() { return this::isContainerMoreUsed; } - /** - * Checks whether a Container has the ReplicationType - * {@link HddsProtos.ReplicationType#EC} and the Legacy Replication Manger is enabled. - * @param container container to check - * @return true if the ReplicationType is EC and "hdds.scm.replication - * .enable.legacy" is true, else false - */ - private boolean isECContainerAndLegacyRMEnabled(ContainerInfo container) { - return container.getReplicationType().equals(HddsProtos.ReplicationType.EC) - && replicationManager.getConfig().isLegacyEnabled(); - } - /** * Gets containers that are suitable for moving based on the following * required criteria: @@ -163,7 +150,6 @@ private boolean isECContainerAndLegacyRMEnabled(ContainerInfo container) { * 3. Container size should be closer to 5GB. * 4. Container must not be in the configured exclude containers list. * 5. Container should be closed. - * 6. If the {@link LegacyReplicationManager} is enabled, then the container should not be an EC container. * @param node DatanodeDetails for which to find candidate containers. * @return true if the container should be excluded, else false */ @@ -179,7 +165,7 @@ public boolean shouldBeExcluded(ContainerID containerID, } return excludeContainers.contains(containerID) || excludeContainersDueToFailure.contains(containerID) || containerToSourceMap.containsKey(containerID) || - !isContainerClosed(container, node) || isECContainerAndLegacyRMEnabled(container) || + !isContainerClosed(container, node) || isContainerReplicatingOrDeleting(containerID) || !findSourceStrategy.canSizeLeaveSource(node, container.getUsedBytes()) || breaksMaxSizeToMoveLimit(container.containerID(), diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java index cbe8385e53a..a0552142b3b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java @@ -19,9 +19,11 @@ package org.apache.hadoop.hdds.scm.container.balancer; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; import java.time.OffsetDateTime; import java.util.List; +import java.util.stream.Collectors; /** * Info about balancer status. @@ -51,4 +53,21 @@ public HddsProtos.ContainerBalancerConfigurationProto getConfiguration() { public List getIterationsStatusInfo() { return iterationsStatusInfo; } + + /** + * Converts an instance into a protobuf-compatible object. + * @return proto representation + */ + public StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto toProto() { + return StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto + .newBuilder() + .setStartedAt(getStartedAt().toEpochSecond()) + .setConfiguration(getConfiguration()) + .addAllIterationsStatusInfo( + getIterationsStatusInfo() + .stream() + .map(ContainerBalancerTaskIterationStatusInfo::toProto) + .collect(Collectors.toList()) + ).build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 7fea44671ff..54c0f4d81e7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -37,12 +37,12 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.time.Duration; +import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -52,16 +52,23 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Queue; import java.util.Set; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import static java.time.OffsetDateTime.now; +import static java.util.Collections.emptyMap; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL_DEFAULT; +import static org.apache.hadoop.util.StringUtils.byteDesc; /** * Container balancer task performs move of containers between over- and @@ -71,6 +78,7 @@ public class ContainerBalancerTask implements Runnable { public static final Logger LOG = LoggerFactory.getLogger(ContainerBalancerTask.class); + public static final long ABSENCE_OF_DURATION = -1L; private NodeManager nodeManager; private ContainerManager containerManager; @@ -100,7 +108,6 @@ public class ContainerBalancerTask implements Runnable { private double lowerLimit; private ContainerBalancerSelectionCriteria selectionCriteria; private volatile Status taskStatus = Status.RUNNING; - /* Since a container can be selected only once during an iteration, these maps use it as a primary key to track source to target pairings. @@ -117,7 +124,9 @@ public class ContainerBalancerTask implements Runnable { private IterationResult iterationResult; private int nextIterationIndex; private boolean delayStart; - private List iterationsStatistic; + private Queue iterationsStatistic; + private OffsetDateTime currentIterationStarted; + private AtomicBoolean isCurrentIterationInProgress = new AtomicBoolean(false); /** * Constructs ContainerBalancerTask with the specified arguments. @@ -158,7 +167,15 @@ public ContainerBalancerTask(StorageContainerManager scm, this.selectedSources = new HashSet<>(); this.selectedTargets = new HashSet<>(); findSourceStrategy = new FindSourceGreedy(nodeManager); - this.iterationsStatistic = new ArrayList<>(); + if (config.getNetworkTopologyEnable()) { + findTargetStrategy = new FindTargetGreedyByNetworkTopology( + containerManager, placementPolicyValidateProxy, + nodeManager, networkTopology); + } else { + findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager, + placementPolicyValidateProxy, nodeManager); + } + this.iterationsStatistic = new ConcurrentLinkedQueue<>(); } /** @@ -207,6 +224,10 @@ private void balance() { // leader change or restart int i = nextIterationIndex; for (; i < iterations && isBalancerRunning(); i++) { + currentIterationStarted = now(); + + isCurrentIterationInProgress.compareAndSet(false, true); + // reset some variables and metrics for this iteration resetState(); if (config.getTriggerDuEnable()) { @@ -253,21 +274,29 @@ private void balance() { return; } - IterationResult iR = doIteration(); - saveIterationStatistic(i, iR); + IterationResult currentIterationResult = doIteration(); + ContainerBalancerTaskIterationStatusInfo iterationStatistic = + getIterationStatistic(i + 1, currentIterationResult, getCurrentIterationDuration()); + iterationsStatistic.offer(iterationStatistic); + + isCurrentIterationInProgress.compareAndSet(true, false); + + findTargetStrategy.clearSizeEnteringNodes(); + findSourceStrategy.clearSizeLeavingNodes(); + metrics.incrementNumIterations(1); - LOG.info("Result of this iteration of Container Balancer: {}", iR); + LOG.info("Result of this iteration of Container Balancer: {}", currentIterationResult); // if no new move option is generated, it means the cluster cannot be // balanced anymore; so just stop balancer - if (iR == IterationResult.CAN_NOT_BALANCE_ANY_MORE) { - tryStopWithSaveConfiguration(iR.toString()); + if (currentIterationResult == IterationResult.CAN_NOT_BALANCE_ANY_MORE) { + tryStopWithSaveConfiguration(currentIterationResult.toString()); return; } // persist next iteration index - if (iR == IterationResult.ITERATION_COMPLETED) { + if (currentIterationResult == IterationResult.ITERATION_COMPLETED) { try { saveConfiguration(config, true, i + 1); } catch (IOException | TimeoutException e) { @@ -298,83 +327,143 @@ private void balance() { tryStopWithSaveConfiguration("Completed all iterations."); } - private void saveIterationStatistic(Integer iterationNumber, IterationResult iR) { - ContainerBalancerTaskIterationStatusInfo iterationStatistic = new ContainerBalancerTaskIterationStatusInfo( - iterationNumber, - iR.name(), - getSizeScheduledForMoveInLatestIteration() / OzoneConsts.GB, - metrics.getDataSizeMovedGBInLatestIteration(), - metrics.getNumContainerMovesScheduledInLatestIteration(), - metrics.getNumContainerMovesCompletedInLatestIteration(), - metrics.getNumContainerMovesFailedInLatestIteration(), - metrics.getNumContainerMovesTimeoutInLatestIteration(), - findTargetStrategy.getSizeEnteringNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect( - Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ), - findSourceStrategy.getSizeLeavingNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect( - Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ) + private ContainerBalancerTaskIterationStatusInfo getIterationStatistic(Integer iterationNumber, + IterationResult currentIterationResult, + long iterationDuration) { + String currentIterationResultName = currentIterationResult == null ? null : currentIterationResult.name(); + Map sizeEnteringDataToNodes = + convertToNodeIdToTrafficMap(findTargetStrategy.getSizeEnteringNodes()); + Map sizeLeavingDataFromNodes = + convertToNodeIdToTrafficMap(findSourceStrategy.getSizeLeavingNodes()); + IterationInfo iterationInfo = new IterationInfo( + iterationNumber, + currentIterationResultName, + iterationDuration ); - iterationsStatistic.add(iterationStatistic); + ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(metrics); + + DataMoveInfo dataMoveInfo = + getDataMoveInfo(currentIterationResultName, sizeEnteringDataToNodes, sizeLeavingDataFromNodes); + return new ContainerBalancerTaskIterationStatusInfo(iterationInfo, containerMoveInfo, dataMoveInfo); + } + + private DataMoveInfo getDataMoveInfo(String currentIterationResultName, Map sizeEnteringDataToNodes, + Map sizeLeavingDataFromNodes) { + if (currentIterationResultName == null) { + // For unfinished iteration + return new DataMoveInfo( + getSizeScheduledForMoveInLatestIteration(), + sizeActuallyMovedInLatestIteration, + sizeEnteringDataToNodes, + sizeLeavingDataFromNodes + ); + } else { + // For finished iteration + return new DataMoveInfo( + getSizeScheduledForMoveInLatestIteration(), + metrics.getDataSizeMovedInLatestIteration(), + sizeEnteringDataToNodes, + sizeLeavingDataFromNodes + ); + } } + private Map convertToNodeIdToTrafficMap(Map nodeTrafficMap) { + return nodeTrafficMap + .entrySet() + .stream() + .filter(Objects::nonNull) + .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) + .collect( + Collectors.toMap( + entry -> entry.getKey().getUuid(), + Map.Entry::getValue + ) + ); + } + + /** + * Get current iteration statistics. + * @return current iteration statistic + */ public List getCurrentIterationsStatistic() { + List resultList = new ArrayList<>(iterationsStatistic); + ContainerBalancerTaskIterationStatusInfo currentIterationStatistic = createCurrentIterationStatistic(); + if (currentIterationStatistic != null) { + resultList.add(currentIterationStatistic); + } + return resultList; + } - int lastIterationNumber = iterationsStatistic.stream() + private ContainerBalancerTaskIterationStatusInfo createCurrentIterationStatistic() { + List resultList = new ArrayList<>(iterationsStatistic); + + int lastIterationNumber = resultList.stream() .mapToInt(ContainerBalancerTaskIterationStatusInfo::getIterationNumber) .max() .orElse(0); + long iterationDuration = getCurrentIterationDuration(); - ContainerBalancerTaskIterationStatusInfo currentIterationStatistic = new ContainerBalancerTaskIterationStatusInfo( + if (isCurrentIterationInProgress.get()) { + return getIterationStatistic(lastIterationNumber + 1, null, iterationDuration); + } else { + return null; + } + } + + private static ContainerBalancerTaskIterationStatusInfo getEmptyCurrentIterationStatistic( + long iterationDuration) { + ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(0, 0, 0, 0); + DataMoveInfo dataMoveInfo = new DataMoveInfo( + 0, + 0, + emptyMap(), + emptyMap() + ); + IterationInfo iterationInfo = new IterationInfo( + 0, + null, + iterationDuration + ); + return new ContainerBalancerTaskIterationStatusInfo( + iterationInfo, + containerMoveInfo, + dataMoveInfo + ); + } + + private ContainerBalancerTaskIterationStatusInfo getFilledCurrentIterationStatistic(int lastIterationNumber, + long iterationDuration) { + Map sizeEnteringDataToNodes = + convertToNodeIdToTrafficMap(findTargetStrategy.getSizeEnteringNodes()); + Map sizeLeavingDataFromNodes = + convertToNodeIdToTrafficMap(findSourceStrategy.getSizeLeavingNodes()); + + ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(metrics); + DataMoveInfo dataMoveInfo = new DataMoveInfo( + getSizeScheduledForMoveInLatestIteration(), + sizeActuallyMovedInLatestIteration, + sizeEnteringDataToNodes, + sizeLeavingDataFromNodes + ); + IterationInfo iterationInfo = new IterationInfo( lastIterationNumber + 1, null, - getSizeScheduledForMoveInLatestIteration() / OzoneConsts.GB, - sizeActuallyMovedInLatestIteration / OzoneConsts.GB, - metrics.getNumContainerMovesScheduledInLatestIteration(), - metrics.getNumContainerMovesCompletedInLatestIteration(), - metrics.getNumContainerMovesFailedInLatestIteration(), - metrics.getNumContainerMovesTimeoutInLatestIteration(), - findTargetStrategy.getSizeEnteringNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect(Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ), - findSourceStrategy.getSizeLeavingNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect( - Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ) + iterationDuration + ); + return new ContainerBalancerTaskIterationStatusInfo( + iterationInfo, + containerMoveInfo, + dataMoveInfo ); - List resultList = new ArrayList<>(iterationsStatistic); - resultList.add(currentIterationStatistic); - return resultList; + } + + private long getCurrentIterationDuration() { + if (currentIterationStarted == null) { + return ABSENCE_OF_DURATION; + } else { + return now().toEpochSecond() - currentIterationStarted.toEpochSecond(); + } } /** @@ -432,14 +521,7 @@ private boolean initializeIteration() { this.maxDatanodesRatioToInvolvePerIteration = config.getMaxDatanodesRatioToInvolvePerIteration(); this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration(); - if (config.getNetworkTopologyEnable()) { - findTargetStrategy = new FindTargetGreedyByNetworkTopology( - containerManager, placementPolicyValidateProxy, - nodeManager, networkTopology); - } else { - findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager, - placementPolicyValidateProxy, nodeManager); - } + this.excludeNodes = config.getExcludeNodes(); this.includeNodes = config.getIncludeNodes(); // include/exclude nodes from balancing according to configs @@ -690,7 +772,7 @@ private void checkIterationMoveResults() { moveSelectionToFutureMap.values(); if (!futures.isEmpty()) { CompletableFuture allFuturesResult = CompletableFuture.allOf( - futures.toArray(new CompletableFuture[futures.size()])); + futures.toArray(new CompletableFuture[0])); try { allFuturesResult.get(config.getMoveTimeout().toMillis(), TimeUnit.MILLISECONDS); @@ -707,26 +789,28 @@ private void checkIterationMoveResults() { } } - countDatanodesInvolvedPerIteration = - selectedSources.size() + selectedTargets.size(); - metrics.incrementNumDatanodesInvolvedInLatestIteration( - countDatanodesInvolvedPerIteration); - metrics.incrementNumContainerMovesScheduled( - metrics.getNumContainerMovesScheduledInLatestIteration()); - metrics.incrementNumContainerMovesCompleted( - metrics.getNumContainerMovesCompletedInLatestIteration()); - metrics.incrementNumContainerMovesTimeout( - metrics.getNumContainerMovesTimeoutInLatestIteration()); - metrics.incrementDataSizeMovedGBInLatestIteration( - sizeActuallyMovedInLatestIteration / OzoneConsts.GB); - metrics.incrementDataSizeMovedGB( - metrics.getDataSizeMovedGBInLatestIteration()); - metrics.incrementNumContainerMovesFailed( - metrics.getNumContainerMovesFailedInLatestIteration()); + countDatanodesInvolvedPerIteration = selectedSources.size() + selectedTargets.size(); + + metrics.incrementNumDatanodesInvolvedInLatestIteration(countDatanodesInvolvedPerIteration); + + metrics.incrementNumContainerMovesScheduled(metrics.getNumContainerMovesScheduledInLatestIteration()); + + metrics.incrementNumContainerMovesCompleted(metrics.getNumContainerMovesCompletedInLatestIteration()); + + metrics.incrementNumContainerMovesTimeout(metrics.getNumContainerMovesTimeoutInLatestIteration()); + + metrics.incrementDataSizeMovedGBInLatestIteration(sizeActuallyMovedInLatestIteration / OzoneConsts.GB); + + metrics.incrementDataSizeMovedInLatestIteration(sizeActuallyMovedInLatestIteration); + + metrics.incrementDataSizeMovedGB(metrics.getDataSizeMovedGBInLatestIteration()); + + metrics.incrementNumContainerMovesFailed(metrics.getNumContainerMovesFailedInLatestIteration()); + LOG.info("Iteration Summary. Number of Datanodes involved: {}. Size " + "moved: {} ({} Bytes). Number of Container moves completed: {}.", countDatanodesInvolvedPerIteration, - StringUtils.byteDesc(sizeActuallyMovedInLatestIteration), + byteDesc(sizeActuallyMovedInLatestIteration), sizeActuallyMovedInLatestIteration, metrics.getNumContainerMovesCompletedInLatestIteration()); } @@ -904,18 +988,8 @@ private boolean moveContainer(DatanodeDetails source, CompletableFuture future; try { ContainerInfo containerInfo = containerManager.getContainer(containerID); + future = moveManager.move(containerID, source, moveSelection.getTargetNode()); - /* - If LegacyReplicationManager is enabled, ReplicationManager will - redirect to it. Otherwise, use MoveManager. - */ - if (replicationManager.getConfig().isLegacyEnabled()) { - future = replicationManager - .move(containerID, source, moveSelection.getTargetNode()); - } else { - future = moveManager.move(containerID, source, - moveSelection.getTargetNode()); - } metrics.incrementNumContainerMovesScheduledInLatestIteration(1); future = future.whenComplete((result, ex) -> { @@ -954,7 +1028,7 @@ private boolean moveContainer(DatanodeDetails source, selectionCriteria.addToExcludeDueToFailContainers(moveSelection.getContainerID()); metrics.incrementNumContainerMovesFailedInLatestIteration(1); return false; - } catch (NodeNotFoundException | TimeoutException e) { + } catch (NodeNotFoundException e) { LOG.warn("Container move failed for container {}", containerID, e); metrics.incrementNumContainerMovesFailedInLatestIteration(1); return false; @@ -1145,6 +1219,7 @@ private void resetState() { this.sizeScheduledForMoveInLatestIteration = 0; this.sizeActuallyMovedInLatestIteration = 0; metrics.resetDataSizeMovedGBInLatestIteration(); + metrics.resetDataSizeMovedInLatestIteration(); metrics.resetNumContainerMovesScheduledInLatestIteration(); metrics.resetNumContainerMovesCompletedInLatestIteration(); metrics.resetNumContainerMovesTimeoutInLatestIteration(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java index 1d597b0ca27..a466d9fd474 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java @@ -18,86 +18,160 @@ package org.apache.hadoop.hdds.scm.container.balancer; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; + +import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; +import java.util.stream.Collectors; /** * Information about balancer task iteration. */ public class ContainerBalancerTaskIterationStatusInfo { - private final Integer iterationNumber; - private final String iterationResult; - private final long sizeScheduledForMoveGB; - private final long dataSizeMovedGB; - private final long containerMovesScheduled; - private final long containerMovesCompleted; - private final long containerMovesFailed; - private final long containerMovesTimeout; - private final Map sizeEnteringNodesGB; - private final Map sizeLeavingNodesGB; - - @SuppressWarnings("checkstyle:ParameterNumber") + + private final IterationInfo iterationInfo; + private final ContainerMoveInfo containerMoveInfo; + private final DataMoveInfo dataMoveInfo; + public ContainerBalancerTaskIterationStatusInfo( - Integer iterationNumber, - String iterationResult, - long sizeScheduledForMoveGB, - long dataSizeMovedGB, - long containerMovesScheduled, - long containerMovesCompleted, - long containerMovesFailed, - long containerMovesTimeout, - Map sizeEnteringNodesGB, - Map sizeLeavingNodesGB) { - this.iterationNumber = iterationNumber; - this.iterationResult = iterationResult; - this.sizeScheduledForMoveGB = sizeScheduledForMoveGB; - this.dataSizeMovedGB = dataSizeMovedGB; - this.containerMovesScheduled = containerMovesScheduled; - this.containerMovesCompleted = containerMovesCompleted; - this.containerMovesFailed = containerMovesFailed; - this.containerMovesTimeout = containerMovesTimeout; - this.sizeEnteringNodesGB = sizeEnteringNodesGB; - this.sizeLeavingNodesGB = sizeLeavingNodesGB; + IterationInfo iterationInfo, + ContainerMoveInfo containerMoveInfo, + DataMoveInfo dataMoveInfo) { + this.iterationInfo = iterationInfo; + this.containerMoveInfo = containerMoveInfo; + this.dataMoveInfo = dataMoveInfo; } + /** + * Get the number of iterations. + * @return iteration number + */ public Integer getIterationNumber() { - return iterationNumber; + return iterationInfo.getIterationNumber(); } + /** + * Get the iteration result. + * @return iteration result + */ public String getIterationResult() { - return iterationResult; + return iterationInfo.getIterationResult(); } - public long getSizeScheduledForMoveGB() { - return sizeScheduledForMoveGB; + /** + * Get the size of the bytes that are scheduled to move in the iteration. + * @return size in bytes + */ + public long getSizeScheduledForMove() { + return dataMoveInfo.getSizeScheduledForMove(); } - public long getDataSizeMovedGB() { - return dataSizeMovedGB; + /** + * Get the size of the bytes that were moved in the iteration. + * @return size in bytes + */ + public long getDataSizeMoved() { + return dataMoveInfo.getDataSizeMoved(); } + /** + * Get the number of containers scheduled to move. + * @return number of containers scheduled to move + */ public long getContainerMovesScheduled() { - return containerMovesScheduled; + return containerMoveInfo.getContainerMovesScheduled(); } + /** + * Get the number of successfully moved containers. + * @return number of successfully moved containers + */ public long getContainerMovesCompleted() { - return containerMovesCompleted; + return containerMoveInfo.getContainerMovesCompleted(); } + /** + * Get the number of containers that were not moved successfully. + * @return number of unsuccessfully moved containers + */ public long getContainerMovesFailed() { - return containerMovesFailed; + return containerMoveInfo.getContainerMovesFailed(); } + /** + * Get the number of containers moved with a timeout. + * @return number of moved with timeout containers + */ public long getContainerMovesTimeout() { - return containerMovesTimeout; + return containerMoveInfo.getContainerMovesTimeout(); + } + + /** + * Get a map of the node IDs and the corresponding data sizes moved to each node. + * @return nodeId to size entering from node map + */ + public Map getSizeEnteringNodes() { + return dataMoveInfo.getSizeEnteringNodes(); + } + + /** + * Get a map of the node IDs and the corresponding data sizes moved from each node. + * @return nodeId to size leaving from node map + */ + public Map getSizeLeavingNodes() { + return dataMoveInfo.getSizeLeavingNodes(); + } + + /** + * Get the iteration duration. + * @return iteration duration + */ + public Long getIterationDuration() { + return iterationInfo.getIterationDuration(); } - public Map getSizeEnteringNodesGB() { - return sizeEnteringNodesGB; + /** + * Converts an instance into the protobuf compatible object. + * @return proto representation + */ + public StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto toProto() { + return StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(getIterationNumber()) + .setIterationResult(Optional.ofNullable(getIterationResult()).orElse("")) + .setIterationDuration(getIterationDuration()) + .setSizeScheduledForMove(getSizeScheduledForMove()) + .setDataSizeMoved(getDataSizeMoved()) + .setContainerMovesScheduled(getContainerMovesScheduled()) + .setContainerMovesCompleted(getContainerMovesCompleted()) + .setContainerMovesFailed(getContainerMovesFailed()) + .setContainerMovesTimeout(getContainerMovesTimeout()) + .addAllSizeEnteringNodes( + mapToProtoNodeTransferInfo(getSizeEnteringNodes()) + ) + .addAllSizeLeavingNodes( + mapToProtoNodeTransferInfo(getSizeLeavingNodes()) + ) + .build(); } - public Map getSizeLeavingNodesGB() { - return sizeLeavingNodesGB; + /** + * Converts an instance into the protobuf compatible object. + * @param nodes node id to node traffic size + * @return node transfer info proto representation + */ + private List mapToProtoNodeTransferInfo( + Map nodes + ) { + return nodes.entrySet() + .stream() + .map(entry -> StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() + .setUuid(entry.getKey().toString()) + .setDataVolume(entry.getValue()) + .build() + ) + .collect(Collectors.toList()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerMoveInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerMoveInfo.java new file mode 100644 index 00000000000..caed286480b --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerMoveInfo.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +/** + * Information about moving containers. + */ +public class ContainerMoveInfo { + private final long containerMovesScheduled; + private final long containerMovesCompleted; + private final long containerMovesFailed; + private final long containerMovesTimeout; + + public ContainerMoveInfo(long containerMovesScheduled, long containerMovesCompleted, long containerMovesFailed, + long containerMovesTimeout) { + this.containerMovesScheduled = containerMovesScheduled; + this.containerMovesCompleted = containerMovesCompleted; + this.containerMovesFailed = containerMovesFailed; + this.containerMovesTimeout = containerMovesTimeout; + } + + public ContainerMoveInfo(ContainerBalancerMetrics metrics) { + this.containerMovesScheduled = metrics.getNumContainerMovesScheduledInLatestIteration(); + this.containerMovesCompleted = metrics.getNumContainerMovesCompletedInLatestIteration(); + this.containerMovesFailed = metrics.getNumContainerMovesFailedInLatestIteration(); + this.containerMovesTimeout = metrics.getNumContainerMovesTimeoutInLatestIteration(); + } + + public long getContainerMovesScheduled() { + return containerMovesScheduled; + } + + public long getContainerMovesCompleted() { + return containerMovesCompleted; + } + + public long getContainerMovesFailed() { + return containerMovesFailed; + } + + public long getContainerMovesTimeout() { + return containerMovesTimeout; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/DataMoveInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/DataMoveInfo.java new file mode 100644 index 00000000000..cd97011768d --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/DataMoveInfo.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +import java.util.Map; +import java.util.UUID; + +/** + * Information about the process of moving data. + */ +public class DataMoveInfo { + private final long sizeScheduledForMove; + private final long dataSizeMoved; + private final Map sizeEnteringNodes; + private final Map sizeLeavingNodes; + + + public DataMoveInfo( + long sizeScheduledForMove, + long dataSizeMoved, + Map sizeEnteringNodes, + Map sizeLeavingNodes) { + this.sizeScheduledForMove = sizeScheduledForMove; + this.dataSizeMoved = dataSizeMoved; + this.sizeEnteringNodes = sizeEnteringNodes; + this.sizeLeavingNodes = sizeLeavingNodes; + } + + public long getSizeScheduledForMove() { + return sizeScheduledForMove; + } + + public long getDataSizeMoved() { + return dataSizeMoved; + } + + public Map getSizeEnteringNodes() { + return sizeEnteringNodes; + } + + public Map getSizeLeavingNodes() { + return sizeLeavingNodes; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 435cc9859a9..9773ae45f50 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -26,11 +26,11 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.PriorityQueue; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; /** * The selection criteria for selecting source datanodes , the containers of @@ -46,7 +46,7 @@ public class FindSourceGreedy implements FindSourceStrategy { private Double lowerLimit; FindSourceGreedy(NodeManager nodeManager) { - sizeLeavingNode = new HashMap<>(); + sizeLeavingNode = new ConcurrentHashMap<>(); potentialSources = new PriorityQueue<>((a, b) -> { double currentUsageOfA = a.calculateUtilization( -sizeLeavingNode.get(a.getDatanodeDetails())); @@ -206,4 +206,9 @@ public void reInitialize(List potentialDataNodes, public Map getSizeLeavingNodes() { return sizeLeavingNode; } + + @Override + public void clearSizeLeavingNodes() { + sizeLeavingNode.clear(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java index 9e429aaa21d..0043d8509b0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java @@ -87,5 +87,14 @@ void reInitialize(List potentialDataNodes, */ void resetPotentialSources(@Nonnull Collection sources); + /** + * Get a map of the node IDs and the corresponding data sizes moved from each node. + * @return nodeId to size leaving from node map + */ Map getSizeLeavingNodes(); + + /** + * Clear the map of node IDs and their corresponding data sizes that were moved from each node. + */ + void clearSizeLeavingNodes(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java index 389ea6e5192..8959fc4ff23 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java @@ -70,5 +70,14 @@ void reInitialize(List potentialDataNodes, */ void resetPotentialTargets(@Nonnull Collection targets); + /** + * Get a map of the node IDs and the corresponding data sizes moved to each node. + * @return nodeId to size entering from node map + */ Map getSizeEnteringNodes(); + + /** + * Clear the map of node IDs and their corresponding data sizes that were moved to each node. + */ + void clearSizeEnteringNodes(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/IterationInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/IterationInfo.java new file mode 100644 index 00000000000..615848a097a --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/IterationInfo.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +/** + * Information about the iteration. + */ +public class IterationInfo { + + private final Integer iterationNumber; + private final String iterationResult; + private final Long iterationDuration; + + public IterationInfo(Integer iterationNumber, String iterationResult, long iterationDuration) { + this.iterationNumber = iterationNumber; + this.iterationResult = iterationResult; + this.iterationDuration = iterationDuration; + } + + public Integer getIterationNumber() { + return iterationNumber; + } + + public String getIterationResult() { + return iterationResult; + } + + public Long getIterationDuration() { + return iterationDuration; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 7fec06e7e06..1c2b5a3be39 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -346,8 +346,7 @@ protected List chooseDatanodesInternalLegacy( return chooseNodes(null, chosenNodes, mutableFavoredNodes, mutableUsedNodes, favorIndex, nodesRequired, mapSizeRequired); } else { - List mutableExcludedNodes = new ArrayList<>(); - mutableExcludedNodes.addAll(excludedNodes); + List mutableExcludedNodes = new ArrayList<>(excludedNodes); // choose node to meet replication requirement // case 1: one excluded node, choose one on the same rack as the excluded // node, choose others on different racks. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 094e535dcbd..3d113b3d301 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -42,8 +42,7 @@ public SCMNodeMetric(SCMNodeStat stat) { * @param capacity in bytes * @param used in bytes * @param remaining in bytes - * @param committed - * @paaram committed in bytes + * @param committed in bytes */ @VisibleForTesting public SCMNodeMetric(long capacity, long used, long remaining, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java index 0abe8f6ea34..fcfef7de6e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java @@ -248,7 +248,6 @@ public void setOfflineIndexesOkAfterPending(boolean val) { /** * Returns true if a container has under-replication caused by offline * indexes, but it is corrected by a pending add. - * @return */ public boolean offlineIndexesOkAfterPending() { return offlineIndexesOkAfterPending; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java index 80ef8ab4c0d..8d864fbc4e0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java @@ -68,7 +68,7 @@ default boolean isHealthy() { || containerState == HddsProtos.LifeCycleState.QUASI_CLOSED) && getReplicas().stream() .filter(r -> r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE) - .allMatch(r -> LegacyReplicationManager.compareState( + .allMatch(r -> ReplicationManager.compareState( containerState, r.getState())); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java index d1890bdf802..4eef0a8a744 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java @@ -116,7 +116,7 @@ public List getPendingOps(ContainerID containerID) { * Store a ContainerReplicaOp to add a replica for the given ContainerID. * @param containerID ContainerID for which to add a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * added and reported by the datanode, or it will * be discarded. @@ -130,7 +130,7 @@ public void scheduleAddReplica(ContainerID containerID, * Store a ContainerReplicaOp to delete a replica for the given ContainerID. * @param containerID ContainerID for which to delete a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * deleted and reported by the datanode, or it will * be discarded. @@ -145,7 +145,7 @@ public void scheduleDeleteReplica(ContainerID containerID, * been replicated successfully. * @param containerID ContainerID for which to complete the replication * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeAddReplica(ContainerID containerID, @@ -167,7 +167,7 @@ public boolean completeAddReplica(ContainerID containerID, * been deleted successfully. * @param containerID ContainerID for which to complete the deletion * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeDeleteReplica(ContainerID containerID, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java deleted file mode 100644 index f491e2bd6f5..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyRatisContainerReplicaCount.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; - -import java.util.List; -import java.util.Set; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; - -/** - * When HDDS-6447 was done to improve the LegacyReplicationManager, work on - * the new replication manager had already started. When this class was added, - * the LegacyReplicationManager needed separate handling for healthy and - * unhealthy container replicas, but the new replication manager did not yet - * have this functionality. This class is used by the - * LegacyReplicationManager to allow {@link RatisContainerReplicaCount} to - * function for both use cases. When the new replication manager is finished - * and LegacyReplicationManager is removed, this class should be deleted and - * all necessary functionality consolidated to - * {@link RatisContainerReplicaCount} - */ -public class LegacyRatisContainerReplicaCount extends - RatisContainerReplicaCount { - public LegacyRatisContainerReplicaCount(ContainerInfo container, - Set replicas, - int inFlightAdd, - int inFlightDelete, int replicationFactor, - int minHealthyForMaintenance) { - super(container, replicas, inFlightAdd, inFlightDelete, replicationFactor, - minHealthyForMaintenance); - } - - public LegacyRatisContainerReplicaCount(ContainerInfo container, - Set replicas, List pendingOps, - int minHealthyForMaintenance, boolean considerUnhealthy) { - super(container, replicas, pendingOps, minHealthyForMaintenance, - considerUnhealthy); - } - - @Override - protected int healthyReplicaCountAdapter() { - return -getMisMatchedReplicaCount(); - } - - /** - * For LegacyReplicationManager, unhealthy replicas are all replicas that - * don't match the container's state. For a CLOSED container with replicas - * {CLOSED, CLOSING, UNHEALTHY, OPEN}, unhealthy replica count is 3. 2 - * mismatches (CLOSING, OPEN) + 1 UNHEALTHY = 3. - */ - @Override - public int getUnhealthyReplicaCountAdapter() { - return getMisMatchedReplicaCount(); - } - - /** - * Checks if all replicas (except UNHEALTHY) on in-service nodes are in the - * same health state as the container. This is similar to what - * {@link ContainerReplicaCount#isHealthy()} does. The difference is in how - * both methods treat UNHEALTHY replicas. - *

    - * This method is the interface between the decommissioning flow and - * Replication Manager. Callers can use it to check whether replicas of a - * container are in the same state as the container before a datanode is - * taken offline. - *

    - * Note that this method's purpose is to only compare the replica state with - * the container state. It does not check if the container has sufficient - * number of replicas - that is the job of {@link ContainerReplicaCount - * #isSufficientlyReplicatedForOffline(DatanodeDetails, NodeManager)}. - * @return true if the container is healthy enough, which is determined by - * various checks - */ - @Override - public boolean isHealthyEnoughForOffline() { - long countInService = getReplicas().stream() - .filter(r -> r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE) - .count(); - if (countInService == 0) { - /* - Having no in-service nodes is unexpected and SCM shouldn't allow this - to happen in the first place. Return false here just to be safe. - */ - return false; - } - - LifeCycleState containerState = getContainer().getState(); - return (containerState == LifeCycleState.CLOSED - || containerState == LifeCycleState.QUASI_CLOSED) - && getReplicas().stream() - .filter(r -> r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE) - .filter(r -> r.getState() != - ContainerReplicaProto.State.UNHEALTHY) - .allMatch(r -> ReplicationManager.compareState( - containerState, r.getState())); - } - - /** - * For Legacy Replication Manager and Ratis Containers, this method checks - * if the container is sufficiently replicated. It also checks whether - * there are any UNHEALTHY replicas that need to be replicated. - * @param datanode Not used in this implementation - * @param nodeManager An instance of NodeManager, used to check the health - * status of a node - * @return true if the container is sufficiently replicated and there are - * no UNHEALTHY replicas that need to be replicated, false otherwise - */ - @Override - public boolean isSufficientlyReplicatedForOffline(DatanodeDetails datanode, - NodeManager nodeManager) { - return super.isSufficientlyReplicated() && - super.getVulnerableUnhealthyReplicas(dn -> { - try { - return nodeManager.getNodeStatus(dn); - } catch (NodeNotFoundException e) { - return null; - } - }).isEmpty(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java deleted file mode 100644 index 04862e0d317..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java +++ /dev/null @@ -1,2581 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.replication; - -import com.google.common.collect.ImmutableList; -import com.google.protobuf.Message; -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigType; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; -import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; -import org.apache.hadoop.hdds.scm.PlacementPolicy; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; -import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState; -import org.apache.hadoop.hdds.scm.container.balancer.MoveManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler; -import org.apache.hadoop.hdds.scm.ha.SCMHAManager; -import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; -import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer; -import org.apache.hadoop.hdds.scm.metadata.Replicate; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.ClientVersion; -import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.ratis.protocol.exceptions.NotLeaderException; -import org.apache.ratis.util.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.lang.reflect.Proxy; -import java.time.Clock; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; -import static org.apache.hadoop.hdds.conf.ConfigTag.SCM; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; -import static org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType.MOVE; -import static org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaOp.PendingOpType.ADD; -import static org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaOp.PendingOpType.DELETE; - -/** - * Legacy Replication Manager (RM) is a legacy , which is used to process - * non-EC container, and hopefully to be replaced int the future. - */ -public class LegacyReplicationManager { - - public static final Logger LOG = - LoggerFactory.getLogger(LegacyReplicationManager.class); - - static class InflightMap { - private final Map> map - = new ConcurrentHashMap<>(); - private final InflightType type; - private final int sizeLimit; - private final AtomicInteger inflightCount = new AtomicInteger(); - - InflightMap(InflightType type, int sizeLimit) { - this.type = type; - this.sizeLimit = sizeLimit > 0 ? sizeLimit : Integer.MAX_VALUE; - } - - boolean isReplication() { - return type == InflightType.REPLICATION; - } - - private List get(ContainerID id) { - return map.get(id); - } - - boolean containsKey(ContainerID id) { - return map.containsKey(id); - } - - int inflightActionCount(ContainerID id) { - return Optional.ofNullable(map.get(id)).map(List::size).orElse(0); - } - - int containerCount() { - return map.size(); - } - - boolean isFull() { - return inflightCount.get() >= sizeLimit; - } - - void clear() { - map.clear(); - } - - void iterate(ContainerID id, Predicate processor) { - for (; ;) { - final List actions = get(id); - if (actions == null) { - return; - } - synchronized (actions) { - if (get(id) != actions) { - continue; //actions is changed, retry - } - for (Iterator i = actions.iterator(); i.hasNext();) { - final boolean remove = processor.test(i.next()); - if (remove) { - i.remove(); - inflightCount.decrementAndGet(); - } - } - map.computeIfPresent(id, - (k, v) -> v == actions && v.isEmpty() ? null : v); - return; - } - } - } - - boolean add(ContainerID id, InflightAction a) { - final int previous = inflightCount.getAndUpdate( - n -> n < sizeLimit ? n + 1 : n); - if (previous >= sizeLimit) { - return false; - } - for (; ;) { - final List actions = map.computeIfAbsent(id, - key -> new LinkedList<>()); - synchronized (actions) { - if (get(id) != actions) { - continue; //actions is changed, retry - } - final boolean added = actions.add(a); - if (!added) { - inflightCount.decrementAndGet(); - } - return added; - } - } - } - - List getDatanodeDetails(ContainerID id) { - for (; ;) { - final List actions = get(id); - if (actions == null) { - return Collections.emptyList(); - } - synchronized (actions) { - if (get(id) != actions) { - continue; //actions is changed, retry - } - return actions.stream() - .map(InflightAction::getDatanode) - .collect(Collectors.toList()); - } - } - } - } - - /** - * Reference to the ContainerManager. - */ - private final ContainerManager containerManager; - - /** - * PlacementPolicy which is used to identify where a container - * should be replicated. - */ - private final PlacementPolicy containerPlacement; - - /** - * EventPublisher to fire Replicate and Delete container events. - */ - private final EventPublisher eventPublisher; - - /** - * SCMContext from StorageContainerManager. - */ - private final SCMContext scmContext; - - /** - * Used to lookup the health of a nodes or the nodes operational state. - */ - private final NodeManager nodeManager; - - /** - * This is used for tracking container replication commands which are issued - * by ReplicationManager and not yet complete. - */ - private final InflightMap inflightReplication; - - /** - * This is used for tracking container deletion commands which are issued - * by ReplicationManager and not yet complete. - */ - private final InflightMap inflightDeletion; - - /** - * This is used for tracking container move commands - * which are not yet complete. - */ - private final Map> inflightMoveFuture; - - /** - * ReplicationManager specific configuration. - */ - private final ReplicationManagerConfiguration rmConf; - - /** - * Minimum number of replica in a healthy state for maintenance. - */ - private int minHealthyForMaintenance; - - private final Clock clock; - - /** - * Current container size as a bound for choosing datanodes with - * enough space for a replica. - */ - private long currentContainerSize; - - /** - * Replication progress related metrics. - */ - private ReplicationManagerMetrics metrics; - - /** - * scheduler move option. - */ - private final MoveScheduler moveScheduler; - - - /** - * Constructs ReplicationManager instance with the given configuration. - * - * @param conf OzoneConfiguration - * @param containerManager ContainerManager - * @param containerPlacement PlacementPolicy - * @param eventPublisher EventPublisher - */ - @SuppressWarnings("parameternumber") - public LegacyReplicationManager(final ConfigurationSource conf, - final ContainerManager containerManager, - final PlacementPolicy containerPlacement, - final EventPublisher eventPublisher, - final SCMContext scmContext, - final NodeManager nodeManager, - final SCMHAManager scmhaManager, - final Clock clock, - final Table moveTable) - throws IOException { - this.containerManager = containerManager; - this.containerPlacement = containerPlacement; - this.eventPublisher = eventPublisher; - this.scmContext = scmContext; - this.nodeManager = nodeManager; - this.rmConf = conf.getObject(ReplicationManagerConfiguration.class); - LegacyReplicationManagerConfiguration legacyConf = conf - .getObject(LegacyReplicationManagerConfiguration.class); - this.inflightReplication = new InflightMap(InflightType.REPLICATION, - legacyConf.getContainerInflightReplicationLimit()); - this.inflightDeletion = new InflightMap(InflightType.DELETION, - legacyConf.getContainerInflightDeletionLimit()); - this.inflightMoveFuture = new ConcurrentHashMap<>(); - this.minHealthyForMaintenance = rmConf.getMaintenanceReplicaMinimum(); - this.clock = clock; - - this.currentContainerSize = (long) conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.BYTES); - this.metrics = null; - - moveScheduler = new MoveSchedulerImpl.Builder() - .setDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()) - .setRatisServer(scmhaManager.getRatisServer()) - .setMoveTable(moveTable).build(); - } - - - protected synchronized void clearInflightActions() { - inflightReplication.clear(); - inflightDeletion.clear(); - } - - protected synchronized void setMetrics(ReplicationManagerMetrics metrics) { - this.metrics = metrics; - } - - /** - * Process the given container. - * - * @param container ContainerInfo - */ - @SuppressWarnings("checkstyle:methodlength") - protected void processContainer(ContainerInfo container, - ReplicationManagerReport report) { - final ContainerID id = container.containerID(); - try { - // synchronize on the containerInfo object to solve container - // race conditions with ICR/FCR handlers - synchronized (container) { - final Set replicas = containerManager - .getContainerReplicas(id); - final LifeCycleState state = container.getState(); - - /* - * We don't take any action if the container is in OPEN state and - * the container is healthy. If the container is not healthy, i.e. - * the replicas are not in OPEN state, send CLOSE_CONTAINER command. - */ - if (state == LifeCycleState.OPEN) { - if (!isOpenContainerHealthy(container, replicas)) { - report.incrementAndSample( - HealthState.OPEN_UNHEALTHY, container.containerID()); - eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, id); - } - return; - } - - /* - * If the container is in CLOSING state, the replicas can either - * be in OPEN or in CLOSING state. In both of this cases - * we have to resend close container command to the datanodes. - */ - if (state == LifeCycleState.CLOSING) { - setHealthStateForClosing(replicas, container, report); - boolean foundHealthy = false; - for (ContainerReplica replica: replicas) { - if (replica.getState() != State.UNHEALTHY) { - foundHealthy = true; - sendCloseCommand( - container, replica.getDatanodeDetails(), false); - } - } - - /* - * Empty containers in CLOSING state should be CLOSED. - * - * These are containers that are allocated in SCM but never got - * created on Datanodes. Since these containers don't have any - * replica associated with them, they are stuck in CLOSING state - * forever as there is no replicas to CLOSE. - */ - if (replicas.isEmpty() && (container.getNumberOfKeys() == 0)) { - closeEmptyContainer(container); - return; - } - - if (!foundHealthy) { - /* If we get here, then this container has replicas and all are - UNHEALTHY. Move it from CLOSING to QUASI_CLOSED so RM can then try - to maintain replication factor number of replicas. - */ - containerManager.updateContainerState(container.containerID(), - HddsProtos.LifeCycleEvent.QUASI_CLOSE); - LOG.debug("Moved container {} from CLOSING to QUASI_CLOSED " + - "because it has only UNHEALTHY replicas: {}.", container, - replicas); - } - - return; - } - - /* - * If the container is in QUASI_CLOSED state, check and close the - * container if possible. - */ - if (state == LifeCycleState.QUASI_CLOSED) { - if (canForceCloseContainer(container, replicas)) { - forceCloseContainer(container, replicas); - return; - } else { - report.incrementAndSample(HealthState.QUASI_CLOSED_STUCK, - container.containerID()); - } - } - - if (container.getReplicationType() == HddsProtos.ReplicationType.EC) { - // TODO We do not support replicating EC containers as yet, so at this - // point, after handing the closing etc states, we just return. - // EC Support will be added later. - return; - } - - /* - * Before processing the container we have to reconcile the - * inflightReplication and inflightDeletion actions. - * - * We remove the entry from inflightReplication and inflightDeletion - * list, if the operation is completed or if it has timed out. - */ - updateInflightAction(container, inflightReplication, - action -> replicas.stream().anyMatch( - r -> r.getDatanodeDetails().equals(action.getDatanode())), - () -> metrics.incrReplicaCreateTimeoutTotal(), - action -> updateCompletedReplicationMetrics(container, action)); - - updateInflightAction(container, inflightDeletion, - action -> replicas.stream().noneMatch( - r -> r.getDatanodeDetails().equals(action.getDatanode())), - () -> metrics.incrReplicaDeleteTimeoutTotal(), - action -> updateCompletedDeletionMetrics(container, action)); - - /* - * If container is under deleting and all it's replicas are deleted, - * then make the container as CLEANED, - * or resend the delete replica command if needed. - */ - if (state == LifeCycleState.DELETING) { - handleContainerUnderDelete(container, replicas); - return; - } - - /** - * We don't need to take any action for a DELETE container - eventually - * it will be removed from SCM. - */ - if (state == LifeCycleState.DELETED) { - return; - } - - RatisContainerReplicaCount replicaSet = - getContainerReplicaCount(container, replicas); - ContainerPlacementStatus placementStatus = getPlacementStatus( - replicas, container.getReplicationConfig().getRequiredNodes()); - - /* - * We don't have to take any action if the container is healthy. - * - * According to ReplicationMonitor container is considered healthy if - * the container is either in QUASI_CLOSED or in CLOSED state and has - * exact number of replicas in the same state. - */ - if (isContainerEmpty(container, replicas)) { - report.incrementAndSample( - HealthState.EMPTY, container.containerID()); - /* - * If container is empty, schedule task to delete the container. - */ - deleteContainerReplicas(container, replicas); - return; - } - - // If the container is empty and has no replicas, it is possible it was - // a container which stuck in the closing state which never got any - // replicas created on the datanodes. In this case, we don't have enough - // information to delete the container, so we just log it as EMPTY, - // leaving it as CLOSED and return true, otherwise, it will end up - // marked as missing by the under replication handling. - if (replicas.isEmpty() - && container.getState() == LifeCycleState.CLOSED - && container.getNumberOfKeys() == 0) { - LOG.debug("Container {} appears empty and is closed, but cannot be " + - "deleted because it has no replicas. Marking as EMPTY.", - container); - report.incrementAndSample(HealthState.EMPTY, container.containerID()); - return; - } - - /* - * Check if the container is under replicated and take appropriate - * action. - */ - boolean sufficientlyReplicated = replicaSet.isSufficientlyReplicated(); - boolean placementSatisfied = placementStatus.isPolicySatisfied(); - ContainerID containerID = container.containerID(); - if (!placementStatus.isPolicySatisfied()) { - report.incrementAndSample(HealthState.MIS_REPLICATED, containerID); - } - if (!replicaSet.isHealthy()) { - report.incrementAndSample(HealthState.UNHEALTHY, containerID); - } - if (!sufficientlyReplicated || !placementSatisfied) { - // Replicate container if needed. - if (!inflightReplication.isFull() || !inflightDeletion.isFull()) { - if (replicaSet.isUnrecoverable()) { - // There are no healthy or unhealthy replicas. - report.incrementAndSample(HealthState.MISSING, containerID); - report.incrementAndSample(HealthState.UNDER_REPLICATED, - containerID); - } else { - if (replicaSet.getHealthyReplicaCount() == 0 && - replicaSet.getUnhealthyReplicaCount() != 0) { - handleAllReplicasUnhealthy(container, replicaSet, - placementStatus, report); - } else { - handleUnderReplicatedHealthy(container, - replicaSet, placementStatus, report); - } - } - } - return; - } - - /* - * A QUASI_CLOSED container may have some UNHEALTHY replicas with the - * same Sequence ID as the container. RM should try to maintain one - * copy of such replicas when there are no healthy replicas that - * match the container's Sequence ID. - */ - List vulnerableUnhealthy = - replicaSet.getVulnerableUnhealthyReplicas(dn -> { - try { - return nodeManager.getNodeStatus(dn); - } catch (NodeNotFoundException e) { - LOG.warn("Exception for datanode {} while getting vulnerable replicas for container {}, with all " + - "replicas {}.", dn, container, replicas, e); - return null; - } - }); - if (!vulnerableUnhealthy.isEmpty()) { - report.incrementAndSample(HealthState.UNDER_REPLICATED, - container.containerID()); - handleVulnerableUnhealthyReplicas(replicaSet, vulnerableUnhealthy); - return; - } - - /* - * Check if the container is over replicated and take appropriate - * action. - */ - if (replicaSet.getReplicas().size() > - container.getReplicationConfig().getRequiredNodes()) { - if (replicaSet.isHealthy()) { - handleOverReplicatedHealthy(container, replicaSet, report); - } else { - handleOverReplicatedExcessUnhealthy(container, replicaSet, report); - } - return; - } - - /* - * If we get here, the container is not over replicated or under - * replicated, but it may be "unhealthy", which means it has one or - * more replica which are not in the same state as the container itself. - */ - if (!replicaSet.isHealthy()) { - handleContainerWithUnhealthyReplica(container, replicaSet); - } - } - } catch (ContainerNotFoundException ex) { - LOG.warn("Missing container {}.", id); - } catch (Exception ex) { - LOG.warn("Process container {} error: ", id, ex); - } - } - - /** - * Sends a replicate command for each replica specified in - * vulnerableUnhealthy. - * @param replicaCount RatisContainerReplicaCount for this container - * @param vulnerableUnhealthy List of UNHEALTHY replicas that need to be - * replicated - */ - private void handleVulnerableUnhealthyReplicas( - RatisContainerReplicaCount replicaCount, - List vulnerableUnhealthy) { - ContainerInfo container = replicaCount.getContainer(); - LOG.debug("Handling vulnerable UNHEALTHY replicas {} for container {}.", - vulnerableUnhealthy, container); - int pendingAdds = getInflightAdd(container.containerID()); - if (pendingAdds >= vulnerableUnhealthy.size()) { - LOG.debug("There are {} pending adds for container {}, while " + - "the number of UNHEALTHY replicas is {}.", pendingAdds, - container.containerID(), vulnerableUnhealthy.size()); - return; - } - - /* - Since we're replicating UNHEALTHY replicas, it's possible that - replication keeps on failing. Shuffling gives other replicas a chance to be - replicated since there's a limit on inflight adds. - */ - Collections.shuffle(vulnerableUnhealthy); - replicateEachSource(container, vulnerableUnhealthy, - replicaCount.getReplicas()); - } - - private void updateCompletedReplicationMetrics(ContainerInfo container, - InflightAction action) { - metrics.incrReplicasCreatedTotal(); - metrics.incrReplicationBytesCompletedTotal(container.getUsedBytes()); - metrics.addReplicationTime(clock.millis() - action.getTime()); - } - - private void updateCompletedDeletionMetrics(ContainerInfo container, - InflightAction action) { - metrics.incrReplicasDeletedTotal(); - metrics.incrDeletionBytesCompletedTotal(container.getUsedBytes()); - metrics.addDeletionTime(clock.millis() - action.getTime()); - } - - /** - * Reconciles the InflightActions for a given container. - * - * @param container Container to update - * @param inflightActions inflightReplication (or) inflightDeletion - * @param filter filter to check if the operation is completed - * @param timeoutCounter update timeout metrics - * @param completedCounter update completed metrics - */ - private void updateInflightAction(final ContainerInfo container, - final InflightMap inflightActions, - final Predicate filter, - final Runnable timeoutCounter, - final Consumer completedCounter) { - final ContainerID id = container.containerID(); - final long deadline = clock.millis() - rmConf.getEventTimeout(); - inflightActions.iterate(id, a -> updateInflightAction( - container, a, filter, timeoutCounter, completedCounter, - deadline, inflightActions.isReplication())); - } - - private boolean updateInflightAction(final ContainerInfo container, - final InflightAction a, - final Predicate filter, - final Runnable timeoutCounter, - final Consumer completedCounter, - final long deadline, - final boolean isReplication) { - boolean remove = false; - try { - final NodeStatus status = nodeManager.getNodeStatus(a.getDatanode()); - final boolean isUnhealthy = status.getHealth() != NodeState.HEALTHY; - final boolean isCompleted = filter.test(a); - final boolean isTimeout = a.getTime() < deadline; - final boolean isNotInService = status.getOperationalState() != - NodeOperationalState.IN_SERVICE; - if (isCompleted || isUnhealthy || isTimeout || isNotInService) { - if (isTimeout) { - timeoutCounter.run(); - } else if (isCompleted) { - completedCounter.accept(a); - } - - updateMoveIfNeeded(isUnhealthy, isCompleted, isTimeout, - isNotInService, container, a.getDatanode(), isReplication); - remove = true; - } - } catch (NodeNotFoundException | ContainerNotFoundException e) { - // Should not happen, but if it does, just remove the action as the - // node somehow does not exist; - remove = true; - } catch (Exception e) { - LOG.error("Got exception while updating.", e); - } - return remove; - } - - /** - * update inflight move if needed. - * - * @param isUnhealthy is the datanode unhealthy - * @param isCompleted is the action completed - * @param isTimeout is the action timeout - * @param container Container to update - * @param dn datanode which is removed from the inflightActions - * @param isInflightReplication is inflightReplication? - */ - private void updateMoveIfNeeded(final boolean isUnhealthy, - final boolean isCompleted, final boolean isTimeout, - final boolean isNotInService, - final ContainerInfo container, final DatanodeDetails dn, - final boolean isInflightReplication) - throws SCMException { - // make sure inflightMove contains the container - ContainerID id = container.containerID(); - - // make sure the datanode , which is removed from inflightActions, - // is source or target datanode. - MoveDataNodePair kv = moveScheduler.getMoveDataNodePair(id); - if (kv == null) { - return; - } - final boolean isSource = kv.getSrc().equals(dn); - final boolean isTarget = kv.getTgt().equals(dn); - if (!isSource && !isTarget) { - return; - } - - /* - * there are some case: - ********************************************************** - * * InflightReplication * InflightDeletion * - ********************************************************** - *source removed* unexpected * expected * - ********************************************************** - *target removed* expected * unexpected * - ********************************************************** - * unexpected action may happen somehow. to make it deterministic, - * if unexpected action happens, we just fail the completableFuture. - */ - - if (isSource && isInflightReplication) { - //if RM is reinitialize, inflightMove will be restored, - //but inflightMoveFuture not. so there will be a case that - //container is in inflightMove, but not in inflightMoveFuture. - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR); - LOG.info("Move failed because replication for container {} " + - "unexpectedly happened at the source {}, not the target {}.", - container, kv.getSrc().getUuidString(), kv.getTgt().getUuidString()); - moveScheduler.completeMove(id.getProtobuf()); - return; - } - - if (isTarget && !isInflightReplication) { - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR); - LOG.info("Move failed because deletion for container {} unexpectedly " + - "happened at the target {}, not the source {}.", container, - kv.getTgt().getUuidString(), kv.getSrc().getUuidString()); - moveScheduler.completeMove(id.getProtobuf()); - return; - } - - if (!(isInflightReplication && isCompleted)) { - if (isInflightReplication) { - if (isUnhealthy) { - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY); - } else if (isNotInService) { - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE); - } else { - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.REPLICATION_FAIL_TIME_OUT); - } - } else { - if (isUnhealthy) { - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.DELETION_FAIL_NODE_UNHEALTHY); - } else if (isTimeout) { - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.DELETION_FAIL_TIME_OUT); - } else if (isNotInService) { - compleleteMoveFutureWithResult(id, - MoveManager.MoveResult.DELETION_FAIL_NODE_NOT_IN_SERVICE); - } else { - compleleteMoveFutureWithResult(id, MoveManager.MoveResult.COMPLETED); - } - } - moveScheduler.completeMove(id.getProtobuf()); - } else { - deleteSrcDnForMove(container, - containerManager.getContainerReplicas(id)); - } - } - - /** - * add a move action for a given container. - * - * @param cid Container to move - * @param src source datanode - * @param tgt target datanode - */ - public CompletableFuture move(ContainerID cid, - DatanodeDetails src, DatanodeDetails tgt) - throws ContainerNotFoundException, NodeNotFoundException { - return move(cid, new MoveDataNodePair(src, tgt)); - } - - /** - * add a move action for a given container. - * - * @param cid Container to move - * @param mp MoveDataNodePair which contains source and target datanodes - */ - private CompletableFuture move(ContainerID cid, - MoveDataNodePair mp) throws ContainerNotFoundException, - NodeNotFoundException { - CompletableFuture ret = new CompletableFuture<>(); - - if (!scmContext.isLeader()) { - ret.complete(MoveManager.MoveResult.FAIL_LEADER_NOT_READY); - return ret; - } - - /* - * make sure the flowing conditions are met: - * 1 the given two datanodes are in healthy state - * 2 the given container exists on the given source datanode - * 3 the given container does not exist on the given target datanode - * 4 the given container is in closed state - * 5 the giver container is not taking any inflight action - * 6 the given two datanodes are in IN_SERVICE state - * 7 {Existing replicas + Target_Dn - Source_Dn} satisfies - * the placement policy - * - * move is a combination of two steps : replication and deletion. - * if the conditions above are all met, then we take a conservative - * strategy here : replication can always be executed, but the execution - * of deletion always depends on placement policy - */ - - DatanodeDetails srcDn = mp.getSrc(); - DatanodeDetails targetDn = mp.getTgt(); - NodeStatus currentNodeStat = nodeManager.getNodeStatus(srcDn); - NodeState healthStat = currentNodeStat.getHealth(); - NodeOperationalState operationalState = - currentNodeStat.getOperationalState(); - if (healthStat != NodeState.HEALTHY) { - ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY); - LOG.info("Failing move for container {} because source {} is {}", cid, - srcDn.getUuidString(), healthStat.toString()); - return ret; - } - if (operationalState != NodeOperationalState.IN_SERVICE) { - ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE); - LOG.info("Failing move for container {} because source {} is {}", cid, - srcDn.getUuidString(), operationalState.toString()); - return ret; - } - - currentNodeStat = nodeManager.getNodeStatus(targetDn); - healthStat = currentNodeStat.getHealth(); - operationalState = currentNodeStat.getOperationalState(); - if (healthStat != NodeState.HEALTHY) { - ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY); - LOG.info("Failing move for container {} because target {} is {}", cid, - targetDn.getUuidString(), healthStat.toString()); - return ret; - } - if (operationalState != NodeOperationalState.IN_SERVICE) { - ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE); - LOG.info("Failing move for container {} because target {} is {}", cid, - targetDn.getUuidString(), operationalState.toString()); - return ret; - } - - // we need to synchronize on ContainerInfo, since it is - // shared by ICR/FCR handler and this.processContainer - // TODO: use a Read lock after introducing a RW lock into ContainerInfo - ContainerInfo cif = containerManager.getContainer(cid); - synchronized (cif) { - final Set currentReplicas = containerManager - .getContainerReplicas(cid); - final Set replicas = currentReplicas.stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toSet()); - if (replicas.contains(targetDn)) { - ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_EXIST_IN_TARGET); - return ret; - } - if (!replicas.contains(srcDn)) { - ret.complete( - MoveManager.MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE); - return ret; - } - - /* - * the reason why the given container should not be taking any inflight - * action is that: if the given container is being replicated or deleted, - * the num of its replica is not deterministic, so move operation issued - * by balancer may cause a nondeterministic result, so we should drop - * this option for this time. - * */ - - if (inflightReplication.containsKey(cid)) { - ret.complete( - MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_REPLICATION); - return ret; - } - if (inflightDeletion.containsKey(cid)) { - ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_DELETION); - return ret; - } - - /* - * here, no need to see whether cid is in inflightMove, because - * these three map are all synchronized on ContainerInfo, if cid - * is in infligtMove , it must now being replicated or deleted, - * so it must be in inflightReplication or in infligthDeletion. - * thus, if we can not find cid in both of them , this cid must - * not be in inflightMove. - */ - - LifeCycleState currentContainerStat = cif.getState(); - if (currentContainerStat != LifeCycleState.CLOSED) { - ret.complete( - MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED); - return ret; - } - - // check whether {Existing replicas + Target_Dn - Source_Dn} - // satisfies current placement policy - if (!isPolicySatisfiedAfterMove(cif, srcDn, targetDn, - new ArrayList<>(currentReplicas))) { - ret.complete(MoveManager.MoveResult.REPLICATION_NOT_HEALTHY_AFTER_MOVE); - return ret; - } - - try { - moveScheduler.startMove(cid.getProtobuf(), - mp.getProtobufMessage(ClientVersion.CURRENT_VERSION)); - } catch (IOException e) { - LOG.warn("Exception while starting move for container {}", cid, e); - ret.complete(MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR); - return ret; - } - - inflightMoveFuture.putIfAbsent(cid, ret); - sendReplicateCommand(cif, targetDn, Collections.singletonList(srcDn)); - } - LOG.info("receive a move request about container {} , from {} to {}", - cid, srcDn.getUuid(), targetDn.getUuid()); - return ret; - } - - /** - * Returns whether {Existing replicas + Target_Dn - Source_Dn} - * satisfies current placement policy. - * @param cif Container Info of moved container - * @param srcDn DatanodeDetails of source data node - * @param targetDn DatanodeDetails of target data node - * @param replicas container replicas - * @return whether the placement policy is satisfied after move - */ - private boolean isPolicySatisfiedAfterMove(ContainerInfo cif, - DatanodeDetails srcDn, DatanodeDetails targetDn, - final List replicas) { - Set movedReplicas = new HashSet<>(replicas); - movedReplicas.removeIf(r -> r.getDatanodeDetails().equals(srcDn)); - movedReplicas.add(ContainerReplica.newBuilder() - .setDatanodeDetails(targetDn) - .setContainerID(cif.containerID()) - .setContainerState(State.CLOSED).build()); - ContainerPlacementStatus placementStatus = getPlacementStatus( - movedReplicas, cif.getReplicationConfig().getRequiredNodes()); - return placementStatus.isPolicySatisfied(); - } - - /** - * Returns the number replica which are pending creation for the given - * container ID. - * @param id The ContainerID for which to check the pending replica - * @return The number of inflight additions or zero if none - */ - private int getInflightAdd(final ContainerID id) { - return inflightReplication.inflightActionCount(id); - } - - /** - * Returns the number replica which are pending delete for the given - * container ID. - * @param id The ContainerID for which to check the pending replica - * @return The number of inflight deletes or zero if none - */ - private int getInflightDel(final ContainerID id) { - return inflightDeletion.inflightActionCount(id); - } - - /** - * Returns true if the container is empty and CLOSED. - * A container is deemed empty if its keyCount (num of blocks) is 0. The - * usedBytes counter is not checked here because usedBytes is not a - * accurate representation of the committed blocks. There could be orphaned - * chunks in the container which contribute to the usedBytes. - * - * @param container Container to check - * @param replicas Set of ContainerReplicas - * @return true if the container is empty, false otherwise - */ - private boolean isContainerEmpty(final ContainerInfo container, - final Set replicas) { - return container.getState() == LifeCycleState.CLOSED && - !replicas.isEmpty() && - replicas.stream().allMatch( - r -> r.getState() == State.CLOSED && r.isEmpty()); - } - - /** - * Given a ContainerID, lookup the ContainerInfo and then return a - * ContainerReplicaCount object for the container. - * @param containerID The ID of the container - * @return ContainerReplicaCount for the given container - * @throws ContainerNotFoundException - */ - public ContainerReplicaCount getContainerReplicaCount(ContainerID containerID) - throws ContainerNotFoundException { - ContainerInfo container = containerManager.getContainer(containerID); - return getContainerReplicaCount(container); - } - - /** - * Given a container, obtain the set of known replica for it, and return a - * ContainerReplicaCount object. This object will contain the set of replica - * as well as all information required to determine if the container is over - * or under replicated, including the delta of replica required to repair the - * over or under replication. - * - * @param container The container to create a ContainerReplicaCount for - * @return ContainerReplicaCount representing the replicated state of the - * container. - * @throws ContainerNotFoundException - */ - public ContainerReplicaCount getContainerReplicaCount(ContainerInfo container) - throws ContainerNotFoundException { - // TODO: using a RW lock for only read - synchronized (container) { - final Set replica = containerManager - .getContainerReplicas(container.containerID()); - return getReplicaCountOptionallyConsiderUnhealthy(container, replica); - } - } - - /** - * Given a container and its set of replicas, create and return a - * ContainerReplicaCount representing the container. - * - * @param container The container for which to construct a - * ContainerReplicaCount - * @param replica The set of existing replica for this container - * @return ContainerReplicaCount representing the current state of the - * container - */ - private RatisContainerReplicaCount getContainerReplicaCount( - ContainerInfo container, Set replica) { - return new LegacyRatisContainerReplicaCount( - container, - replica, - getInflightAdd(container.containerID()), - getInflightDel(container.containerID()), - container.getReplicationConfig().getRequiredNodes(), - minHealthyForMaintenance); - } - - private RatisContainerReplicaCount getReplicaCountOptionallyConsiderUnhealthy( - ContainerInfo container, Set replicas) { - LegacyRatisContainerReplicaCount withUnhealthy = - new LegacyRatisContainerReplicaCount(container, replicas, - getPendingOps(container.containerID()), minHealthyForMaintenance, - true); - if (withUnhealthy.getHealthyReplicaCount() == 0 && - withUnhealthy.getUnhealthyReplicaCount() > 0) { - // if the container has only UNHEALTHY replicas, return the correct - // RatisContainerReplicaCount object which can handle UNHEALTHY replicas - return withUnhealthy; - } - - return new LegacyRatisContainerReplicaCount( - container, - replicas, - getInflightAdd(container.containerID()), - getInflightDel(container.containerID()), - container.getReplicationConfig().getRequiredNodes(), - minHealthyForMaintenance); - } - - /** - * Returns true if more than 50% of the container replicas with unique - * originNodeId are in QUASI_CLOSED state. - * - * @param container Container to check - * @param replicas Set of ContainerReplicas - * @return true if we can force close the container, false otherwise - */ - private boolean canForceCloseContainer(final ContainerInfo container, - final Set replicas) { - Preconditions.assertTrue(container.getState() == - LifeCycleState.QUASI_CLOSED); - final int replicationFactor = - container.getReplicationConfig().getRequiredNodes(); - final long uniqueQuasiClosedReplicaCount = replicas.stream() - .filter(r -> r.getState() == State.QUASI_CLOSED) - .map(ContainerReplica::getOriginDatanodeId) - .distinct() - .count(); - return uniqueQuasiClosedReplicaCount > (replicationFactor / 2); - } - - /** - * Delete the container and its replicas. - * - * @param container ContainerInfo - * @param replicas Set of ContainerReplicas - */ - private void deleteContainerReplicas(final ContainerInfo container, - final Set replicas) throws IOException, - InvalidStateTransitionException { - Preconditions.assertTrue(container.getState() == - LifeCycleState.CLOSED); - - replicas.stream().forEach(rp -> { - Preconditions.assertTrue(rp.getState() == State.CLOSED); - Preconditions.assertTrue(rp.isEmpty()); - sendDeleteCommand(container, rp.getDatanodeDetails(), false); - }); - containerManager.updateContainerState(container.containerID(), - HddsProtos.LifeCycleEvent.DELETE); - LOG.debug("Deleting empty container replicas for {},", container); - } - - /** - * Handle the container which is under delete. - * - * @param container ContainerInfo - * @param replicas Set of ContainerReplicas - */ - private void handleContainerUnderDelete(final ContainerInfo container, - final Set replicas) throws IOException, - InvalidStateTransitionException { - if (replicas.size() == 0) { - containerManager.updateContainerState(container.containerID(), - HddsProtos.LifeCycleEvent.CLEANUP); - LOG.debug("Container {} state changes to DELETED", container); - } else { - // Check whether to resend the delete replica command - final List deletionInFlight - = inflightDeletion.getDatanodeDetails(container.containerID()); - Set filteredReplicas = replicas.stream().filter( - r -> !deletionInFlight.contains(r.getDatanodeDetails())) - .collect(Collectors.toSet()); - // Resend the delete command - if (filteredReplicas.size() > 0) { - filteredReplicas.stream().forEach(rp -> { - sendDeleteCommand(container, rp.getDatanodeDetails(), false); - }); - LOG.debug("Resend delete Container command for {}", container); - } - } - } - - /** - * Force close the container replica(s) with highest sequence Id. - * - *

    - * Note: We should force close the container only if >50% (quorum) - * of replicas with unique originNodeId are in QUASI_CLOSED state. - *

    - * - * @param container ContainerInfo - * @param replicas Set of ContainerReplicas - */ - private void forceCloseContainer(final ContainerInfo container, - final Set replicas) { - Preconditions.assertTrue(container.getState() == - LifeCycleState.QUASI_CLOSED); - - final List quasiClosedReplicas = replicas.stream() - .filter(r -> r.getState() == State.QUASI_CLOSED) - .collect(Collectors.toList()); - - final Long sequenceId = quasiClosedReplicas.stream() - .map(ContainerReplica::getSequenceId) - .max(Long::compare) - .orElse(-1L); - - LOG.info("Force closing container {} with BCSID {}," + - " which is in QUASI_CLOSED state.", - container.containerID(), sequenceId); - - quasiClosedReplicas.stream() - .filter(r -> sequenceId != -1L) - .filter(replica -> replica.getSequenceId().equals(sequenceId)) - .forEach(replica -> sendCloseCommand( - container, replica.getDatanodeDetails(), true)); - } - - /** - * If the given container is under replicated, identify a new set of - * datanode(s) to replicate the container using PlacementPolicy - * and send replicate container command to the identified datanode(s). - * - * @param container ContainerInfo - * @param replicaSet An instance of ContainerReplicaCount, containing the - * current replica count and inflight adds and deletes - */ - private void handleUnderReplicatedHealthy(final ContainerInfo container, - final RatisContainerReplicaCount replicaSet, - final ContainerPlacementStatus placementStatus, - ReplicationManagerReport report) { - LOG.debug("Handling under-replicated container: {}", container); - if (replicaSet.isSufficientlyReplicated() - && placementStatus.isPolicySatisfied()) { - LOG.info("The container {} with replicas {} is sufficiently " + - "replicated and is not mis-replicated", - container.getContainerID(), replicaSet); - return; - } - - List allReplicas = replicaSet.getReplicas(); - int numCloseCommandsSent = closeReplicasIfPossible(container, allReplicas); - int replicasNeeded = - replicaSet.additionalReplicaNeeded() - numCloseCommandsSent; - - if (replicasNeeded > 0) { - report.incrementAndSample(HealthState.UNDER_REPLICATED, - container.containerID()); - } - - State matchingReplicaState = State.CLOSED; - if (container.getState() == LifeCycleState.QUASI_CLOSED) { - // If we are replicating quasi closed replicas, they should have the - // same origin node ID and therefore the same BCSID. If they have - // different origin node IDs, then we have 2/3 containers and it should - // have been closed before replicating. - matchingReplicaState = State.QUASI_CLOSED; - } - List replicationSources = getReplicationSources(container, - replicaSet.getReplicas(), matchingReplicaState); - // This method will handle topology even if replicasNeeded <= 0. - try { - replicateAnyWithTopology(container, replicationSources, - placementStatus, replicasNeeded, replicaSet.getReplicas()); - } catch (SCMException e) { - if (e.getResult() - .equals(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE) && - replicasNeeded > 0) { - /* - If we reach here, the container is under replicated but placement - policy could not find any target Datanodes to host new replicas. - We can try unblocking under replication handling by removing any - unhealthy replicas. This will free up those datanodes, so they can host - healthy replicas. - */ - deleteUnhealthyReplicaIfNeeded(container, replicaSet); - } - } - } - - /** - * Finds and deletes an unhealthy replica (UNHEALTHY or QUASI_CLOSED) under - * certain conditions. - */ - private void deleteUnhealthyReplicaIfNeeded(ContainerInfo container, - RatisContainerReplicaCount replicaCount) { - LOG.info("Finding an unhealthy replica to delete for container {} with " + - "replicas {} to unblock under replication handling.", container, - replicaCount.getReplicas()); - - Set replicas = new HashSet<>(replicaCount.getReplicas()); - ContainerReplica replica = ReplicationManagerUtil - .selectUnhealthyReplicaForDelete(container, replicas, - getInflightDel(container.containerID()), - (dnd) -> { - try { - return nodeManager.getNodeStatus(dnd); - } catch (NodeNotFoundException e) { - LOG.warn("Exception while finding an unhealthy replica to " + - "delete for container {}.", container, e); - return null; - } - }); - - if (replica == null) { - LOG.info( - "Could not find any unhealthy replica to delete when unblocking " + - "under replication handling for container {} with replicas {}.", - container, replicas); - } else { - sendDeleteCommand(container, replica.getDatanodeDetails(), true); - } - } - - /** - * If the given container is over replicated, identify the datanode(s) - * to delete the container and send delete container command to the - * identified datanode(s). This method ignores unhealthy containers. - * - * @param container ContainerInfo - * @param replicaSet An instance of ContainerReplicaCount, containing the - * current replica count and inflight adds and deletes - */ - private void handleOverReplicatedHealthy(final ContainerInfo container, - final RatisContainerReplicaCount replicaSet, - ReplicationManagerReport report) { - - final ContainerID id = container.containerID(); - final int replicationFactor = - container.getReplicationConfig().getRequiredNodes(); - int excess = replicaSet.additionalReplicaNeeded() * -1; - if (excess > 0) { - LOG.info("Container {} is over replicated. Expected replica count" + - " is {}, but found {}.", id, replicationFactor, - replicationFactor + excess); - - report.incrementAndSample(HealthState.OVER_REPLICATED, - container.containerID()); - - // The list of replicas that we can potentially delete to fix the over - // replicated state. This method is only concerned with healthy replicas. - final List deleteCandidates = - getHealthyDeletionCandidates(container, replicaSet.getReplicas()); - - if (container.getState() == LifeCycleState.CLOSED) { - // Container is closed, so all healthy replicas are equal. - // We can choose which ones to delete based on topology. - // TODO Legacy RM implementation can only handle topology when all - // container replicas are closed and equal. - deleteExcessWithTopology(excess, container, deleteCandidates); - } else { - // Container is not yet closed. Choose which healthy replicas to - // delete so that we do not lose any origin node IDs. - deleteExcessWithNonUniqueOriginNodeIDs(container, - replicaSet.getReplicas(), - deleteCandidates, excess); - } - } - } - - /** - * Processes replicas of the container when all replicas are unhealthy (in - * a state that does not match the container state). - * - * Unhealthy replicas will first be checked to see if they can be closed. - * If there are more unhealthy replicas than required, some may be deleted. - * If there are fewer unhealthy replicas than required, some may be - * replicated. - */ - private void handleAllReplicasUnhealthy(ContainerInfo container, - RatisContainerReplicaCount replicaSet, - ContainerPlacementStatus placementStatus, - ReplicationManagerReport report) { - - List replicas = replicaSet.getReplicas(); - - RatisContainerReplicaCount unhealthyReplicaSet = - new LegacyRatisContainerReplicaCount(container, - new HashSet<>(replicaSet.getReplicas()), - getPendingOps(container.containerID()), - minHealthyForMaintenance, - true); - - if (unhealthyReplicaSet.isUnderReplicated()) { - handleUnderReplicatedAllUnhealthy(container, replicas, - placementStatus, unhealthyReplicaSet.additionalReplicaNeeded(), - report); - } else if (unhealthyReplicaSet.isOverReplicated()) { - handleOverReplicatedAllUnhealthy(container, replicas, - unhealthyReplicaSet.getExcessRedundancy(true), report); - } else { - // We have the correct number of unhealthy replicas. See if any of them - // can be closed. - closeReplicasIfPossible(container, replicas); - } - } - - /** - * Transform the Legacy inflight operation in the pendingOps format. - * @param containerID The contaiuner to get the pending ops for. - * @return A list of pendingOp, or an empty list if none exist. - */ - private List getPendingOps(ContainerID containerID) { - List pendingOps = new ArrayList<>(); - List inflightActions = inflightReplication.get(containerID); - if (inflightActions != null) { - for (InflightAction a : inflightActions) { - pendingOps.add(new ContainerReplicaOp( - ADD, a.getDatanode(), 0, Long.MAX_VALUE)); - } - } - inflightActions = inflightDeletion.get(containerID); - if (inflightActions != null) { - for (InflightAction a : inflightActions) { - pendingOps.add(new ContainerReplicaOp( - DELETE, a.getDatanode(), 0, Long.MAX_VALUE)); - } - } - return pendingOps; - } - - /** - * Handles a container which has the correct number of healthy replicas, - * but an excess of unhealthy replicas. - * - * If the container is closed, the unhealthy replicas can be deleted. If the - * container is not yet closed, the unhealthy replicas with non-unique - * origin node IDs can be deleted. - */ - private void handleOverReplicatedExcessUnhealthy( - final ContainerInfo container, - final RatisContainerReplicaCount replicaSet, - ReplicationManagerReport report) { - // Note - ReplicationManager would reach here only if the - // following conditions are met: - // 1. Container is in either CLOSED or QUASI-CLOSED state - // 2. We have adequate healthy replicas with extra unhealthy - // replicas. - - List replicas = replicaSet.getReplicas(); - List unhealthyReplicas = - getUnhealthyDeletionCandidates(container, replicas); - - // Only unhealthy replicas which cannot be closed will remain eligible - // for deletion, since this method is deleting unhealthy containers only. - closeReplicasIfPossible(container, unhealthyReplicas); - if (!unhealthyReplicas.isEmpty()) { - int excessReplicaCount = replicas.size() - - container.getReplicationConfig().getRequiredNodes(); - boolean excessDeleted = false; - if (container.getState() == LifeCycleState.CLOSED) { - // The container is already closed. The unhealthy replicas are extras - // and unnecessary. - deleteExcess(container, unhealthyReplicas, excessReplicaCount); - excessDeleted = true; - } else { - // Container is not yet closed. - // We only need to save the unhealthy replicas if they - // represent unique origin node IDs. If recovering these replicas is - // possible in the future they could be used to close the container. - excessDeleted = deleteExcessWithNonUniqueOriginNodeIDs(container, - replicaSet.getReplicas(), unhealthyReplicas, excessReplicaCount); - } - - if (excessDeleted) { - LOG.info("Container {} has {} excess unhealthy replicas. Excess " + - "unhealthy replicas will be deleted.", - container.getContainerID(), unhealthyReplicas.size()); - report.incrementAndSample(HealthState.OVER_REPLICATED, - container.containerID()); - } - } - } - - /** - * This method handles container with unhealthy replica by over-replicating - * the healthy replica. Once the container becomes over-replicated, - * we delete the unhealthy replica in the next cycle of replication manager - * in handleOverReplicatedExcessUnhealthy method. - */ - private void handleContainerWithUnhealthyReplica( - final ContainerInfo container, - final RatisContainerReplicaCount replicaSet) { - /* - * When there is an Unhealthy or Quasi Closed replica with incorrect - * sequence id for a Closed container, it should be deleted and one of - * the healthy replica has to be re-replicated. - * - * We first do the re-replication and over replicate the container, - * in the next cycle of replication manager the excess unhealthy replica - * is deleted. - */ - - if (container.getState() == LifeCycleState.CLOSED) { - final List replicas = replicaSet.getReplicas(); - final List replicationSources = getReplicationSources( - container, replicaSet.getReplicas(), State.CLOSED); - if (replicationSources.isEmpty()) { - LOG.warn("No healthy CLOSED replica for replication."); - return; - } - final ContainerPlacementStatus placementStatus = getPlacementStatus( - new HashSet<>(replicationSources), - container.getReplicationConfig().getRequiredNodes()); - try { - replicateAnyWithTopology(container, replicationSources, - placementStatus, replicas.size() - replicationSources.size(), - replicas); - } catch (SCMException e) { - LOG.warn("Could not fix container {} with replicas {}.", container, - replicas, e); - } - } - } - - /** - * Returns the replicas from {@code replicas} that: - * - Do not have in flight deletions - * - Exist on healthy datanodes - * - Have a replica state matching one of {@code validReplicaStates}. If - * this parameter is empty, any replica state is valid. - */ - private List getReplicationSources(ContainerInfo container, - List replicas, State... validReplicaStates) { - final List deletionInFlight - = inflightDeletion.getDatanodeDetails(container.containerID()); - final Set validReplicaStateSet = Arrays.stream(validReplicaStates) - .collect(Collectors.toSet()); - return replicas.stream() - // Exclude stale and dead nodes. This is particularly important for - // maintenance nodes, as the replicas will remain present in the - // container manager, even when they go dead. - .filter(r -> getNodeStatus(r.getDatanodeDetails()).isHealthy() - && !deletionInFlight.contains(r.getDatanodeDetails()) - && (validReplicaStateSet.isEmpty() || - validReplicaStateSet.contains(r.getState()))) - .collect(Collectors.toList()); - } - - private List getHealthyDeletionCandidates( - ContainerInfo container, List replicas) { - return getDeletionCandidates(container, replicas, true); - } - - private List getUnhealthyDeletionCandidates( - ContainerInfo container, List replicas) { - return getDeletionCandidates(container, replicas, false); - } - - /** - * A replica is eligible for deletion if its datanode is healthy and - * IN_SERVICE. - */ - private List getDeletionCandidates(ContainerInfo container, - List replicas, boolean healthy) { - return replicas.stream() - .filter(r -> getNodeStatus(r.getDatanodeDetails()).isHealthy() - && compareState(container.getState(), r.getState()) == healthy - && r.getDatanodeDetails().getPersistedOpState() == IN_SERVICE) - .collect(Collectors.toList()); - } - - /** - * if the container is in inflightMove, handle move. - * This function assumes replication has been completed - * - * @param cif ContainerInfo - * @param replicaSet An Set of replicas, which may have excess replicas - */ - private void deleteSrcDnForMove(final ContainerInfo cif, - final Set replicaSet) - throws SCMException { - final ContainerID cid = cif.containerID(); - MoveDataNodePair movePair = moveScheduler.getMoveDataNodePair(cid); - if (movePair == null) { - return; - } - final DatanodeDetails srcDn = movePair.getSrc(); - ContainerReplicaCount replicaCount = - getContainerReplicaCount(cif, replicaSet); - - if (!replicaSet.stream() - .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))) { - // if the target is present but source disappears somehow, - // we can consider move is successful. - compleleteMoveFutureWithResult(cid, MoveManager.MoveResult.COMPLETED); - moveScheduler.completeMove(cid.getProtobuf()); - return; - } - - int replicationFactor = - cif.getReplicationConfig().getRequiredNodes(); - ContainerPlacementStatus currentCPS = - getPlacementStatus(replicaSet, replicationFactor); - Set newReplicaSet = new HashSet<>(replicaSet); - newReplicaSet.removeIf(r -> r.getDatanodeDetails().equals(srcDn)); - ContainerPlacementStatus newCPS = - getPlacementStatus(newReplicaSet, replicationFactor); - - if (replicaCount.isOverReplicated() && - isPlacementStatusActuallyEqual(currentCPS, newCPS)) { - sendDeleteCommand(cif, srcDn, true); - } else { - // if source and target datanode are both in the replicaset, - // but we can not delete source datanode for now (e.g., - // there is only 3 replicas or not policy-statisfied , etc.), - // we just complete the future without sending a delete command. - LOG.info("can not remove source replica after successfully " + - "replicated to target datanode"); - compleleteMoveFutureWithResult(cid, - MoveManager.MoveResult.DELETE_FAIL_POLICY); - moveScheduler.completeMove(cid.getProtobuf()); - } - } - - /** - * whether the given two ContainerPlacementStatus are actually equal. - * - * @param cps1 ContainerPlacementStatus - * @param cps2 ContainerPlacementStatus - */ - private boolean isPlacementStatusActuallyEqual( - ContainerPlacementStatus cps1, - ContainerPlacementStatus cps2) { - return (!cps1.isPolicySatisfied() && - cps1.actualPlacementCount() == cps2.actualPlacementCount()) || - cps1.isPolicySatisfied() && cps2.isPolicySatisfied(); - } - - /** - * Given a set of ContainerReplica, transform it to a list of DatanodeDetails - * and then check if the list meets the container placement policy. - * @param replicas List of containerReplica - * @param replicationFactor Expected Replication Factor of the containe - * @return ContainerPlacementStatus indicating if the policy is met or not - */ - private ContainerPlacementStatus getPlacementStatus( - Set replicas, int replicationFactor) { - List replicaDns = replicas.stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList()); - return containerPlacement.validateContainerPlacement( - replicaDns, replicationFactor); - } - - /** - * Sends close container command for the given container to the given - * datanode. - * - * @param container Container to be closed - * @param datanode The datanode on which the container - * has to be closed - * @param force Should be set to true if we want to close a - * QUASI_CLOSED container - */ - private void sendCloseCommand(final ContainerInfo container, - final DatanodeDetails datanode, - final boolean force) { - - ContainerID containerID = container.containerID(); - LOG.info("Sending close container command for container {}" + - " to datanode {}.", containerID, datanode); - CloseContainerCommand closeContainerCommand = - new CloseContainerCommand(container.getContainerID(), - container.getPipelineID(), force); - try { - closeContainerCommand.setTerm(scmContext.getTermOfLeader()); - } catch (NotLeaderException nle) { - LOG.warn("Skip sending close container command," - + " since current SCM is not leader.", nle); - return; - } - closeContainerCommand.setEncodedToken(getContainerToken(containerID)); - eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, - new CommandForDatanode<>(datanode.getUuid(), closeContainerCommand)); - } - - private String getContainerToken(ContainerID containerID) { - if (scmContext.getScm() instanceof StorageContainerManager) { - StorageContainerManager scm = - (StorageContainerManager) scmContext.getScm(); - return scm.getContainerTokenGenerator().generateEncodedToken(containerID); - } - return ""; // unit test - } - - private boolean addInflight(InflightType type, ContainerID id, - InflightAction action) { - final boolean added = getInflightMap(type).add(id, action); - if (!added) { - metrics.incrInflightSkipped(type); - } - return added; - } - - /** - * Sends replicate container command for the given container to the given - * datanode. - * - * @param container Container to be replicated - * @param target The destination datanode to replicate - * @param sources List of source nodes from where we can replicate - */ - private void sendReplicateCommand(final ContainerInfo container, - final DatanodeDetails target, - final List sources) { - - final ContainerID id = container.containerID(); - final long containerID = id.getId(); - final ReplicateContainerCommand replicateCommand = - ReplicateContainerCommand.fromSources(containerID, sources); - LOG.debug("Trying to send {} to {}", replicateCommand, target); - - final boolean sent = sendAndTrackDatanodeCommand(target, replicateCommand, - action -> addInflight(InflightType.REPLICATION, id, action)); - - if (sent) { - LOG.info("Sent {} to {}", replicateCommand, target); - metrics.incrReplicationCmdsSentTotal(); - metrics.incrReplicationBytesTotal(container.getUsedBytes()); - } - } - - /** - * Sends delete container command for the given container to the given - * datanode. - * - * @param container Container to be deleted - * @param datanode The datanode on which the replica should be deleted - * @param force Should be set to true to delete an OPEN replica - */ - private void sendDeleteCommand(final ContainerInfo container, - final DatanodeDetails datanode, - final boolean force) { - - LOG.info("Sending delete container command for container {}" + - " to datanode {}", container.containerID(), datanode); - - final ContainerID id = container.containerID(); - final DeleteContainerCommand deleteCommand = - new DeleteContainerCommand(id.getId(), force); - final boolean sent = sendAndTrackDatanodeCommand(datanode, deleteCommand, - action -> addInflight(InflightType.DELETION, id, action)); - - if (sent) { - metrics.incrDeletionCmdsSentTotal(); - metrics.incrDeletionBytesTotal(container.getUsedBytes()); - } - } - - /** - * Creates CommandForDatanode with the given SCMCommand and fires - * DATANODE_COMMAND event to event queue. - * - * Tracks the command using the given tracker. - * - * @param datanode Datanode to which the command has to be sent - * @param command SCMCommand to be sent - * @param tracker Tracker which tracks the inflight actions - * @param Type of SCMCommand - */ - private boolean sendAndTrackDatanodeCommand( - final DatanodeDetails datanode, - final SCMCommand command, - final Predicate tracker) { - try { - command.setTerm(scmContext.getTermOfLeader()); - } catch (NotLeaderException nle) { - LOG.warn("Skip sending datanode command," - + " since current SCM is not leader.", nle); - return false; - } - final boolean allowed = tracker.test( - new InflightAction(datanode, clock.millis())); - if (!allowed) { - return false; - } - final CommandForDatanode datanodeCommand = - new CommandForDatanode<>(datanode.getUuid(), command); - eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, datanodeCommand); - return true; - } - - /** - * Wrap the call to nodeManager.getNodeStatus, catching any - * NodeNotFoundException and instead throwing an IllegalStateException. - * @param dn The datanodeDetails to obtain the NodeStatus for - * @return NodeStatus corresponding to the given Datanode. - */ - private NodeStatus getNodeStatus(DatanodeDetails dn) { - try { - return nodeManager.getNodeStatus(dn); - } catch (NodeNotFoundException e) { - throw new IllegalStateException("Unable to find NodeStatus for " + dn, e); - } - } - - /** - * Compares the container state with the replica state. - * - * @param containerState ContainerState - * @param replicaState ReplicaState - * @return true if the state matches, false otherwise - */ - public static boolean compareState(final LifeCycleState containerState, - final State replicaState) { - switch (containerState) { - case OPEN: - return replicaState == State.OPEN; - case CLOSING: - return replicaState == State.CLOSING; - case QUASI_CLOSED: - return replicaState == State.QUASI_CLOSED; - case CLOSED: - return replicaState == State.CLOSED; - case DELETING: - return false; - case DELETED: - return false; - default: - return false; - } - } - - /** - * An open container is healthy if all its replicas are in the same state as - * the container. - * @param container The container to check - * @param replicas The replicas belonging to the container - * @return True if the container is healthy, false otherwise - */ - private boolean isOpenContainerHealthy( - ContainerInfo container, Set replicas) { - LifeCycleState state = container.getState(); - return replicas.stream() - .allMatch(r -> compareState(state, r.getState())); - } - - private void setHealthStateForClosing(Set replicas, - ContainerInfo container, - ReplicationManagerReport report) { - if (replicas.size() == 0) { - report.incrementAndSample(HealthState.MISSING, container.containerID()); - report.incrementAndSample(HealthState.UNDER_REPLICATED, - container.containerID()); - report.incrementAndSample(HealthState.MIS_REPLICATED, - container.containerID()); - } - } - - public boolean isContainerReplicatingOrDeleting(ContainerID containerID) { - return inflightReplication.containsKey(containerID) || - inflightDeletion.containsKey(containerID); - } - - /** - * Configuration used by the Replication Manager. - */ - @ConfigGroup(prefix = "hdds.scm.replication") - public static class LegacyReplicationManagerConfiguration { - - @Config(key = "container.inflight.replication.limit", - type = ConfigType.INT, - defaultValue = "0", // 0 means unlimited. - tags = {SCM, OZONE}, - description = "This property is used to limit" + - " the maximum number of inflight replication." - ) - private int containerInflightReplicationLimit = 0; - - @Config(key = "container.inflight.deletion.limit", - type = ConfigType.INT, - defaultValue = "0", // 0 means unlimited. - tags = {SCM, OZONE}, - description = "This property is used to limit" + - " the maximum number of inflight deletion." - ) - private int containerInflightDeletionLimit = 0; - - public void setContainerInflightReplicationLimit(int replicationLimit) { - this.containerInflightReplicationLimit = replicationLimit; - } - - public void setContainerInflightDeletionLimit(int deletionLimit) { - this.containerInflightDeletionLimit = deletionLimit; - } - - public int getContainerInflightReplicationLimit() { - return containerInflightReplicationLimit; - } - - public int getContainerInflightDeletionLimit() { - return containerInflightDeletionLimit; - } - - } - - protected void notifyStatusChanged() { - //now, as the current scm is leader and it`s state is up-to-date, - //we need to take some action about replicated inflight move options. - onLeaderReadyAndOutOfSafeMode(); - } - - private InflightMap getInflightMap(InflightType type) { - switch (type) { - case REPLICATION: return inflightReplication; - case DELETION: return inflightDeletion; - default: throw new IllegalStateException("Unexpected type " + type); - } - } - - int getInflightCount(InflightType type) { - return getInflightMap(type).containerCount(); - } - - DatanodeDetails getFirstDatanode(InflightType type, ContainerID id) { - return getInflightMap(type).get(id).get(0).getDatanode(); - } - - public Map> - getInflightMove() { - return inflightMoveFuture; - } - - /** - * make move option HA aware. - */ - public interface MoveScheduler { - /** - * completeMove a move action for a given container. - * - * @param contianerIDProto Container to which the move option is finished - */ - @Replicate - void completeMove(HddsProtos.ContainerID contianerIDProto) - throws SCMException; - - /** - * start a move action for a given container. - * - * @param contianerIDProto Container to move - * @param mp encapsulates the source and target datanode infos - */ - @Replicate - void startMove(HddsProtos.ContainerID contianerIDProto, - HddsProtos.MoveDataNodePairProto mp) - throws IOException; - - /** - * get the MoveDataNodePair of the giver container. - * - * @param cid Container to move - * @return null if cid is not found in MoveScheduler, - * or the corresponding MoveDataNodePair - */ - MoveDataNodePair getMoveDataNodePair(ContainerID cid); - - /** - * Reinitialize the MoveScheduler with DB if become leader. - */ - void reinitialize(Table moveTable) throws IOException; - - /** - * get all the inflight move info. - */ - Map getInflightMove(); - } - - /** - * @return the moveScheduler of RM - */ - public MoveScheduler getMoveScheduler() { - return moveScheduler; - } - - /** - * Ratis based MoveScheduler, db operations are stored in - * DBTransactionBuffer until a snapshot is taken. - */ - public static final class MoveSchedulerImpl implements MoveScheduler { - private Table moveTable; - private final DBTransactionBuffer transactionBuffer; - /** - * This is used for tracking container move commands - * which are not yet complete. - */ - private final Map inflightMove; - - private MoveSchedulerImpl(Table moveTable, - DBTransactionBuffer transactionBuffer) throws IOException { - this.moveTable = moveTable; - this.transactionBuffer = transactionBuffer; - this.inflightMove = new ConcurrentHashMap<>(); - initialize(); - } - - @Override - public void completeMove(HddsProtos.ContainerID contianerIDProto) { - ContainerID cid = null; - try { - cid = ContainerID.getFromProtobuf(contianerIDProto); - transactionBuffer.removeFromBuffer(moveTable, cid); - } catch (IOException e) { - LOG.warn("Exception while completing move {}", cid); - } - inflightMove.remove(cid); - } - - @Override - public void startMove(HddsProtos.ContainerID contianerIDProto, - HddsProtos.MoveDataNodePairProto mdnpp) - throws IOException { - ContainerID cid = null; - MoveDataNodePair mp = null; - try { - cid = ContainerID.getFromProtobuf(contianerIDProto); - mp = MoveDataNodePair.getFromProtobuf(mdnpp); - if (!inflightMove.containsKey(cid)) { - transactionBuffer.addToBuffer(moveTable, cid, mp); - inflightMove.putIfAbsent(cid, mp); - } - } catch (IOException e) { - LOG.warn("Exception while completing move {}", cid); - } - } - - @Override - public MoveDataNodePair getMoveDataNodePair(ContainerID cid) { - return inflightMove.get(cid); - } - - @Override - public void reinitialize(Table mt) throws IOException { - moveTable = mt; - inflightMove.clear(); - initialize(); - } - - private void initialize() throws IOException { - try (TableIterator> iterator = - moveTable.iterator()) { - - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - final ContainerID cid = kv.getKey(); - final MoveDataNodePair mp = kv.getValue(); - Preconditions.assertNotNull(cid, - "moved container id should not be null"); - Preconditions.assertNotNull(mp, - "MoveDataNodePair container id should not be null"); - inflightMove.put(cid, mp); - } - } - } - - @Override - public Map getInflightMove() { - return inflightMove; - } - - /** - * Builder for Ratis based MoveSchedule. - */ - public static class Builder { - private Table moveTable; - private DBTransactionBuffer transactionBuffer; - private SCMRatisServer ratisServer; - - public Builder setRatisServer(final SCMRatisServer scmRatisServer) { - ratisServer = scmRatisServer; - return this; - } - - public Builder setMoveTable( - final Table mt) { - moveTable = mt; - return this; - } - - public Builder setDBTransactionBuffer(DBTransactionBuffer trxBuffer) { - transactionBuffer = trxBuffer; - return this; - } - - public MoveScheduler build() throws IOException { - Preconditions.assertNotNull(moveTable, "moveTable is null"); - Preconditions.assertNotNull(transactionBuffer, - "transactionBuffer is null"); - - final MoveScheduler impl = - new MoveSchedulerImpl(moveTable, transactionBuffer); - - final SCMHAInvocationHandler invocationHandler - = new SCMHAInvocationHandler(MOVE, impl, ratisServer); - - return (MoveScheduler) Proxy.newProxyInstance( - SCMHAInvocationHandler.class.getClassLoader(), - new Class[]{MoveScheduler.class}, - invocationHandler); - } - } - } - - /** - * when scm become LeaderReady and out of safe mode, some actions - * should be taken. for now , it is only used for handle replicated - * infligtht move. - */ - private void onLeaderReadyAndOutOfSafeMode() { - List needToRemove = new LinkedList<>(); - moveScheduler.getInflightMove().forEach((k, v) -> { - Set replicas; - ContainerInfo cif; - try { - replicas = containerManager.getContainerReplicas(k); - cif = containerManager.getContainer(k); - } catch (ContainerNotFoundException e) { - needToRemove.add(k.getProtobuf()); - LOG.error("can not find container {} " + - "while processing replicated move", k); - return; - } - boolean isSrcExist = replicas.stream() - .anyMatch(r -> r.getDatanodeDetails().equals(v.getSrc())); - boolean isTgtExist = replicas.stream() - .anyMatch(r -> r.getDatanodeDetails().equals(v.getTgt())); - - if (isSrcExist) { - if (isTgtExist) { - //the former scm leader may or may not send the deletion command - //before reelection.here, we just try to send the command again. - try { - deleteSrcDnForMove(cif, replicas); - } catch (Exception ex) { - LOG.error("Exception while cleaning up excess replicas.", ex); - } - } else { - // resenting replication command is ok , no matter whether there is an - // on-going replication - sendReplicateCommand(cif, v.getTgt(), - Collections.singletonList(v.getSrc())); - } - } else { - // if container does not exist in src datanode, no matter it exists - // in target datanode, we can not take more actions to this option, - // so just remove it through ratis - needToRemove.add(k.getProtobuf()); - } - }); - - for (HddsProtos.ContainerID containerID : needToRemove) { - try { - moveScheduler.completeMove(containerID); - } catch (Exception ex) { - LOG.error("Exception while moving container.", ex); - } - } - } - - /** - * complete the CompletableFuture of the container in the given Map with - * the given MoveManager.MoveResult. - */ - private void compleleteMoveFutureWithResult(ContainerID cid, - MoveManager.MoveResult mr) { - if (inflightMoveFuture.containsKey(cid)) { - inflightMoveFuture.get(cid).complete(mr); - inflightMoveFuture.remove(cid); - } - } - - private int closeReplicasIfPossible(ContainerInfo container, - List replicas) { - // This method should not be used on open containers. - if (container.getState() == LifeCycleState.OPEN) { - return 0; - } - - int numCloseCmdsSent = 0; - Iterator iterator = replicas.iterator(); - while (iterator.hasNext()) { - final ContainerReplica replica = iterator.next(); - final State state = replica.getState(); - if (state == State.OPEN || state == State.CLOSING) { - sendCloseCommand(container, replica.getDatanodeDetails(), false); - numCloseCmdsSent++; - iterator.remove(); - } else if (state == State.QUASI_CLOSED && - container.getState() == LifeCycleState.CLOSED) { - // Send force close command if the BCSID matches - if (container.getSequenceId() == replica.getSequenceId()) { - sendCloseCommand(container, replica.getDatanodeDetails(), true); - numCloseCmdsSent++; - iterator.remove(); - } - } - } - - return numCloseCmdsSent; - } - - /* HELPER METHODS FOR UNHEALTHY OVER AND UNDER REPLICATED CONTAINERS */ - - /** - * Process a container with more replicas than required where all replicas - * are unhealthy. - * - * First try to close any replicas that are unhealthy due to pending - * closure. Replicas that can be closed will become healthy and will not be - * processed by this method. - * If the container is closed, delete replicas with lower BCSIDs first. - * If the container is not yet closed, delete replicas with origin node IDs - * already represented by other replicas. - */ - private void handleOverReplicatedAllUnhealthy(ContainerInfo container, - List replicas, int excess, - ReplicationManagerReport report) { - List deleteCandidates = - getUnhealthyDeletionCandidates(container, replicas); - - // Only unhealthy replicas which cannot be closed will remain eligible - // for deletion, since this method is deleting unhealthy containers only. - closeReplicasIfPossible(container, deleteCandidates); - if (deleteCandidates.isEmpty()) { - return; - } - - if (excess > 0) { - boolean excessDeleted = false; - if (container.getState() == LifeCycleState.CLOSED) { - // Prefer to delete unhealthy replicas with lower BCS IDs. - // If the replica became unhealthy after the container was closed but - // before the replica could be closed, it may have a smaller BCSID. - deleteExcessLowestBcsIDs(container, deleteCandidates, excess); - excessDeleted = true; - } else { - // Container is not yet closed. - // We only need to save the unhealthy replicas if they - // represent unique origin node IDs. If recovering these replicas is - // possible in the future they could be used to close the container. - // If all excess replicas are unique then it is possible none of them - // are deleted. - excessDeleted = deleteExcessWithNonUniqueOriginNodeIDs(container, - replicas, deleteCandidates, excess); - } - - if (excessDeleted) { - report.incrementAndSample(HealthState.OVER_REPLICATED, - container.containerID()); - int replicationFactor = container.getReplicationFactor().getNumber(); - LOG.info("Container {} has all unhealthy replicas and is over " + - "replicated. Expected replica count" + - " is {}, but found {}.", container.getContainerID(), - replicationFactor, replicationFactor + excess); - } - } - } - - /** - * Processes container replicas when all replicas are unhealthy and there - * are fewer than the required number of replicas. - * - * If any of these replicas unhealthy because they are pending closure and - * they can be closed, close them to create a healthy replica that can be - * replicated. - * If none of the replicas can be closed, use one of the unhealthy replicas - * to restore replica count while satisfying topology requirements. - */ - private void handleUnderReplicatedAllUnhealthy(ContainerInfo container, - List replicas, ContainerPlacementStatus placementStatus, - int additionalReplicasNeeded, ReplicationManagerReport report) { - - report.incrementAndSample(HealthState.UNDER_REPLICATED, - container.containerID()); - int numCloseCmdsSent = closeReplicasIfPossible(container, replicas); - // Only replicate unhealthy containers if none of the unhealthy replicas - // could be closed. If we sent a close command to an unhealthy replica, - // we should wait for that to complete and replicate it when it becomes - // healthy on a future iteration. - if (numCloseCmdsSent == 0) { - LOG.info("Container {} is under replicated missing {} replicas with all" + - " replicas unhealthy. Copying unhealthy replicas.", - container.getContainerID(), additionalReplicasNeeded); - // TODO Datanodes currently shuffle sources, so we cannot prioritize - // some replicas based on BCSID or origin node ID. - try { - replicateAnyWithTopology(container, - getReplicationSources(container, replicas), placementStatus, - additionalReplicasNeeded, replicas); - } catch (SCMException e) { - LOG.warn("Could not fix container {} with replicas {}.", container, - replicas, e); - } - } - } - - /* HELPER METHODS FOR ALL OVER AND UNDER REPLICATED CONTAINERS */ - - /** - * Deletes the first {@code excess} replicas from {@code deleteCandidates}. - * Replicas whose datanode operation state is not IN_SERVICE will be skipped. - */ - private void deleteExcess(ContainerInfo container, - List deleteCandidates, int excess) { - // Replica which are maintenance or decommissioned are not eligible to - // be removed, as they do not count toward over-replication and they - // also may not be available - deleteCandidates.removeIf(r -> - r.getDatanodeDetails().getPersistedOpState() != - NodeOperationalState.IN_SERVICE); - - deleteCandidates.stream().limit(excess).forEach(r -> - sendDeleteCommand(container, r.getDatanodeDetails(), true)); - } - - /** - * remove execess replicas if needed, replicationFactor and placement policy - * will be take into consideration. - * - * @param excess the excess number after subtracting replicationFactor - * @param container ContainerInfo - * @param eligibleReplicas An list of replicas, which may have excess replicas - */ - private void deleteExcessWithTopology(int excess, - final ContainerInfo container, - final List eligibleReplicas) { - // After removing all unhealthy replicas, if the container is still over - // replicated then we need to check if it is already mis-replicated. - // If it is, we do no harm by removing excess replicas. However, if it is - // not mis-replicated, then we can only remove replicas if they don't - // make the container become mis-replicated. - if (excess > 0) { - Set eligibleSet = new HashSet<>(eligibleReplicas); - final int replicationFactor = - container.getReplicationConfig().getRequiredNodes(); - ContainerPlacementStatus ps = - getPlacementStatus(eligibleSet, replicationFactor); - - for (ContainerReplica r : eligibleReplicas) { - if (excess <= 0) { - break; - } - // First remove the replica we are working on from the set, and then - // check if the set is now mis-replicated. - eligibleSet.remove(r); - ContainerPlacementStatus nowPS = - getPlacementStatus(eligibleSet, replicationFactor); - if (isPlacementStatusActuallyEqual(ps, nowPS)) { - // Remove the replica if the container was already unsatisfied - // and losing this replica keep actual placement count unchanged. - // OR if losing this replica still keep satisfied - sendDeleteCommand(container, r.getDatanodeDetails(), true); - excess -= 1; - continue; - } - // If we decided not to remove this replica, put it back into the set - eligibleSet.add(r); - } - if (excess > 0) { - LOG.info("The container {} is over replicated with {} excess " + - "replica. The excess replicas cannot be removed without " + - "violating the placement policy", container, excess); - } - } - } - - /** - * @param container The container to operate on. - * @param allReplicas All replicas, providing all unique origin node IDs to - * this method. - * @param deleteCandidates The subset of allReplicas that are eligible for - * deletion. - * @param excess The maximum number of replicas to delete. If all origin - * node IDs are unique, no replicas may be deleted. - * @return True if replicas could be deleted. False otherwise. - */ - private boolean deleteExcessWithNonUniqueOriginNodeIDs( - ContainerInfo container, - List allReplicas, - List deleteCandidates, int excess) { - // Remove delete candidates whose origin node ID is not already covered - // by an existing replica. - // TODO topology handling must be improved to make an optimal - // choice as to which replica to keep. - Set allReplicasSet = new HashSet<>(allReplicas); - List nonUniqueDeleteCandidates = - ReplicationManagerUtil.findNonUniqueDeleteCandidates(allReplicasSet, - deleteCandidates, (dnd) -> { - try { - return nodeManager.getNodeStatus(dnd); - } catch (NodeNotFoundException e) { - LOG.warn( - "Exception while finding excess unhealthy replicas to " + - "delete for container {} with replicas {}.", container, - allReplicas, e); - return null; - } - }); - - if (LOG.isDebugEnabled() && nonUniqueDeleteCandidates.size() < excess) { - LOG.debug("Unable to delete {} excess replicas of container {}. Only {}" + - " replicas can be deleted to preserve unique origin node IDs for " + - "this unclosed container.", excess, container.getContainerID(), - nonUniqueDeleteCandidates.size()); - } - - boolean deleteCandidatesPresent = !nonUniqueDeleteCandidates.isEmpty(); - if (deleteCandidatesPresent) { - deleteExcess(container, nonUniqueDeleteCandidates, excess); - } - return deleteCandidatesPresent; - } - - /** - * Delete {@code excess} replicas from {@code deleteCandidates}, deleting - * those with lowest BCSIDs first. - */ - private void deleteExcessLowestBcsIDs(ContainerInfo container, - List deleteCandidates, int excess) { - // Sort containers with lowest BCSID first. These will be the first ones - // deleted. - deleteCandidates.sort( - Comparator.comparingLong(ContainerReplica::getSequenceId)); - deleteExcess(container, deleteCandidates, excess); - } - - /** - * Choose {@code additionalReplicasNeeded} datanodes to make copies of some - * of the container replicas to restore replication factor or satisfy - * topology requirements. - */ - private void replicateAnyWithTopology(ContainerInfo container, - List sourceReplicas, - ContainerPlacementStatus placementStatus, int additionalReplicasNeeded, - List allReplicas) - throws SCMException { - try { - final ContainerID id = container.containerID(); - - final List sourceDNs = sourceReplicas.stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList()); - final List replicationInFlight - = inflightReplication.getDatanodeDetails(id); - if (sourceDNs.size() > 0) { - final int replicationFactor = container - .getReplicationConfig().getRequiredNodes(); - // Want to check if the container is mis-replicated after considering - // inflight add and delete. - // Create a new list from source (healthy replicas minus pending delete) - List targetReplicas = new ArrayList<>(sourceDNs); - // Then add any pending additions - targetReplicas.addAll(replicationInFlight); - final ContainerPlacementStatus inFlightplacementStatus = - containerPlacement.validateContainerPlacement( - targetReplicas, replicationFactor); - final int misRepDelta = inFlightplacementStatus.misReplicationCount(); - final int replicasNeeded = - Math.max(additionalReplicasNeeded, misRepDelta); - if (replicasNeeded <= 0) { - LOG.debug("Container {} meets replication requirement with " + - "inflight replicas", id); - return; - } - - final List excludeList = allReplicas.stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList()); - excludeList.addAll(replicationInFlight); - final List selectedDatanodes = - ReplicationManagerUtil.getTargetDatanodes(containerPlacement, - replicasNeeded, null, excludeList, currentContainerSize, - container); - - if (additionalReplicasNeeded > 0) { - LOG.info("Container {} is under replicated. Expected replica count" + - " is {}, but found {}.", id, replicationFactor, - replicationFactor - additionalReplicasNeeded); - } - int newMisRepDelta = misRepDelta; - if (misRepDelta > 0) { - LOG.info("Container: {}. {}", - id, placementStatus.misReplicatedReason()); - // Check if the new target nodes (original plus newly selected nodes) - // makes the placement policy valid. - targetReplicas.addAll(selectedDatanodes); - newMisRepDelta = containerPlacement.validateContainerPlacement( - targetReplicas, replicationFactor).misReplicationCount(); - } - if (additionalReplicasNeeded > 0 || newMisRepDelta < misRepDelta) { - // Only create new replicas if we are missing a replicas or - // the number of pending mis-replication has improved. No point in - // creating new replicas for mis-replicated containers unless it - // improves things. - for (DatanodeDetails datanode : selectedDatanodes) { - sendReplicateCommand(container, datanode, sourceDNs); - } - } else { - LOG.warn("Container {} is mis-replicated, requiring {} additional " + - "replicas. After selecting new nodes, mis-replication has" + - "not improved. No additional replicas will be scheduled", - id, misRepDelta); - } - } else { - LOG.warn("Cannot replicate container {}, no healthy datanodes with " + - "replica found.", - container.containerID()); - } - } catch (IllegalStateException ex) { - LOG.warn("Exception while replicating container {}.", - container.getContainerID(), ex); - } - } - - /** - * Replicates each of the ContainerReplica specified in sources to new - * Datanodes. Will not consider Datanodes hosting existing replicas and - * Datanodes pending adds as targets. Note that this method simply skips - * the replica if there's an exception. - * @param container Container whose replicas are specified as sources - * @param sources List containing replicas, each will be replicated - * @param allReplicas all existing replicas of this container - */ - private void replicateEachSource(ContainerInfo container, - List sources, List allReplicas) { - final List excludeList = allReplicas.stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList()); - - for (ContainerReplica replica : sources) { - // also exclude any DNs pending to receive a replica of this container - final List replicationInFlight - = inflightReplication.getDatanodeDetails(container.containerID()); - for (DatanodeDetails dn : replicationInFlight) { - if (!excludeList.contains(dn)) { - excludeList.add(dn); - } - } - - try { - final List target = - ReplicationManagerUtil.getTargetDatanodes(containerPlacement, - 1, null, excludeList, currentContainerSize, - container); - sendReplicateCommand(container, target.iterator().next(), - ImmutableList.of(replica.getDatanodeDetails())); - } catch (SCMException e) { - LOG.warn("Exception while trying to replicate {} of container {}.", - replica, container, e); - } - } - } - - private void closeEmptyContainer(ContainerInfo containerInfo) { - /* - * We should wait for sometime before moving the container to CLOSED state. - * This will give enough time for Datanodes to report the container, - * in cases where the container creation was successful on Datanodes. - * - * Should we have a separate configuration for this wait time? - * For now, we are using ReplicationManagerThread Interval * 5 as the wait - * time. - */ - - final Duration waitTime = rmConf.getInterval().multipliedBy(5); - final Instant closingTime = containerInfo.getStateEnterTime(); - - try { - if (clock.instant().isAfter(closingTime.plus(waitTime))) { - containerManager.updateContainerState(containerInfo.containerID(), - HddsProtos.LifeCycleEvent.CLOSE); - } - } catch (IOException | InvalidStateTransitionException e) { - LOG.error("Failed to CLOSE the container {}", - containerInfo.containerID(), e); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java index fe771fac6a4..cb2d906edba 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java @@ -186,9 +186,9 @@ private void countReplicas() { * For example, consider a CLOSED container with the following replicas: * {CLOSED, CLOSING, OPEN, UNHEALTHY} * In this case, healthy replica count equals 3. Calculation: - * 1 CLOSED -> 1 matching replica. - * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. - * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. + * 1 CLOSED -> 1 matching replica. + * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. + * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. * Total healthy replicas = 3 = 1 matching + 2 mismatched replicas */ public int getHealthyReplicaCount() { @@ -223,11 +223,7 @@ private int getAvailableReplicas() { /** * The new replication manager now does not consider replicas with - * UNHEALTHY state when counting sufficient replication. This method is - * overridden to ensure LegacyReplicationManager works as intended in - * HDDS-6447. - * See {@link LegacyRatisContainerReplicaCount}, which overrides this - * method, for details. + * UNHEALTHY state when counting sufficient replication. */ protected int healthyReplicaCountAdapter() { return 0; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index b43caabd8d8..d183c876e95 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; -import org.apache.hadoop.hdds.scm.container.balancer.MoveManager; import org.apache.hadoop.hdds.scm.container.replication.health.ECMisReplicationCheckHandler; import org.apache.hadoop.hdds.scm.container.replication.health.MismatchedReplicasHandler; import org.apache.hadoop.hdds.scm.container.replication.health.ClosedWithUnhealthyReplicasHandler; @@ -85,10 +84,8 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -155,13 +152,6 @@ public class ReplicationManager implements SCMService { */ private ReplicationManagerMetrics metrics; - - /** - * Legacy RM will hopefully be removed after completing refactor - * for now, it is used to process non-EC container. - */ - private final LegacyReplicationManager legacyReplicationManager; - /** * Set of nodes which have been excluded for replication commands due to the * number of commands queued on a datanode. This can be used when generating @@ -214,7 +204,6 @@ public class ReplicationManager implements SCMService { * @param scmContext The SCMContext instance * @param nodeManager The nodeManager instance * @param clock Clock object used to get the current time - * @param legacyReplicationManager The legacy ReplicationManager instance * @param replicaPendingOps The pendingOps instance */ @SuppressWarnings("parameternumber") @@ -226,7 +215,6 @@ public ReplicationManager(final ConfigurationSource conf, final SCMContext scmContext, final NodeManager nodeManager, final Clock clock, - final LegacyReplicationManager legacyReplicationManager, final ContainerReplicaPendingOps replicaPendingOps) throws IOException { this.containerManager = containerManager; @@ -243,7 +231,6 @@ public ReplicationManager(final ConfigurationSource conf, HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT, TimeUnit.MILLISECONDS); this.containerReplicaPendingOps = replicaPendingOps; - this.legacyReplicationManager = legacyReplicationManager; this.ecReplicationCheckHandler = new ECReplicationCheckHandler(); this.ecMisReplicationCheckHandler = new ECMisReplicationCheckHandler(ecContainerPlacement); @@ -296,9 +283,6 @@ public synchronized void start() { LOG.info("Starting Replication Monitor Thread."); running = true; metrics = ReplicationManagerMetrics.create(this); - if (rmConf.isLegacyEnabled()) { - legacyReplicationManager.setMetrics(metrics); - } containerReplicaPendingOps.setReplicationMetrics(metrics); startSubServices(); } else { @@ -330,9 +314,6 @@ public synchronized void stop() { underReplicatedProcessorThread.interrupt(); overReplicatedProcessorThread.interrupt(); running = false; - if (rmConf.isLegacyEnabled()) { - legacyReplicationManager.clearInflightActions(); - } metrics.unRegister(); replicationMonitor.interrupt(); } else { @@ -385,10 +366,6 @@ public synchronized void processAll() { break; } report.increment(c.getState()); - if (rmConf.isLegacyEnabled() && !isEC(c.getReplicationConfig())) { - legacyReplicationManager.processContainer(c, report); - continue; - } try { processContainer(c, newRepQueue, report); // TODO - send any commands contained in the health result @@ -955,12 +932,8 @@ public ContainerReplicaCount getContainerReplicaCount(ContainerID containerID) throws ContainerNotFoundException { ContainerInfo container = containerManager.getContainer(containerID); final boolean isEC = isEC(container.getReplicationConfig()); + return getContainerReplicaCount(container, isEC); - if (!isEC && rmConf.isLegacyEnabled()) { - return legacyReplicationManager.getContainerReplicaCount(container); - } else { - return getContainerReplicaCount(container, isEC); - } } /** @@ -1076,28 +1049,6 @@ ReplicationQueue getQueue() { @ConfigGroup(prefix = "hdds.scm.replication") public static class ReplicationManagerConfiguration extends ReconfigurableConfig { - /** - * True if LegacyReplicationManager should be used for RATIS containers. - */ - @Config(key = "enable.legacy", - type = ConfigType.BOOLEAN, - defaultValue = "false", - tags = {SCM, OZONE}, - description = - "If true, LegacyReplicationManager will handle RATIS containers " + - "while ReplicationManager will handle EC containers. If false, " + - "ReplicationManager will handle both RATIS and EC." - ) - private boolean enableLegacy; - - public boolean isLegacyEnabled() { - return enableLegacy; - } - - public void setEnableLegacy(boolean enableLegacy) { - this.enableLegacy = enableLegacy; - } - /** * The frequency in which ReplicationMonitor thread should run. */ @@ -1414,11 +1365,6 @@ public void notifyStatusChanged() { containerReplicaPendingOps.clear(); serviceStatus = ServiceStatus.RUNNING; } - if (rmConf.isLegacyEnabled()) { - //now, as the current scm is leader and it`s state is up-to-date, - //we need to take some action about replicated inflight move options. - legacyReplicationManager.notifyStatusChanged(); - } } else { serviceStatus = ServiceStatus.PAUSING; } @@ -1456,46 +1402,8 @@ public Clock getClock() { return clock; } - /** - * following functions will be refactored in a separate jira. - */ - public CompletableFuture move( - ContainerID cid, DatanodeDetails src, DatanodeDetails tgt) - throws NodeNotFoundException, ContainerNotFoundException, - TimeoutException { - CompletableFuture ret = - new CompletableFuture<>(); - if (!isRunning()) { - ret.complete(MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR); - LOG.warn("Failing move because Replication Monitor thread's " + - "running state is {}", isRunning()); - return ret; - } - - return legacyReplicationManager.move(cid, src, tgt); - } - - public Map> - getInflightMove() { - return legacyReplicationManager.getInflightMove(); - } - - public LegacyReplicationManager.MoveScheduler getMoveScheduler() { - return legacyReplicationManager.getMoveScheduler(); - } - - @VisibleForTesting - public LegacyReplicationManager getLegacyReplicationManager() { - return legacyReplicationManager; - } - public boolean isContainerReplicatingOrDeleting(ContainerID containerID) { - if (rmConf.isLegacyEnabled()) { - return legacyReplicationManager - .isContainerReplicatingOrDeleting(containerID); - } else { - return !getPendingReplicationOps(containerID).isEmpty(); - } + return !getPendingReplicationOps(containerID).isEmpty(); } private ContainerReplicaCount getContainerReplicaCount( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java index eb75db9bd50..dc48ae22279 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java @@ -163,8 +163,6 @@ public final class ReplicationManagerMetrics implements MetricsSource { private final ReplicationManager replicationManager; - private final boolean legacyReplicationManager; - //EC Metrics @Metric("Number of EC Replication commands sent.") private MutableCounterLong ecReplicationCmdsSentTotal; @@ -231,7 +229,6 @@ public final class ReplicationManagerMetrics implements MetricsSource { public ReplicationManagerMetrics(ReplicationManager manager) { this.registry = new MetricsRegistry(METRICS_SOURCE_NAME); this.replicationManager = manager; - legacyReplicationManager = replicationManager.getConfig().isLegacyEnabled(); } public static ReplicationManagerMetrics create(ReplicationManager manager) { @@ -254,24 +251,10 @@ public void getMetrics(MetricsCollector collector, boolean all) { .addGauge(INFLIGHT_EC_REPLICATION, getEcReplication()) .addGauge(INFLIGHT_EC_DELETION, getEcDeletion()); - if (legacyReplicationManager) { - // For non-legacy RM, we don't need to expose these metrics as the timeout - // metrics below replace them. - builder - .addGauge(INFLIGHT_REPLICATION_SKIPPED, - getInflightReplicationSkipped()) - .addGauge(INFLIGHT_DELETION_SKIPPED, getInflightDeletionSkipped()) - // If not using Legacy RM, move manager should expose its own metrics - // and therefore we don't need IN_FLIGHT_MOVE here. - .addGauge(INFLIGHT_MOVE, getInflightMove()); - } - if (!legacyReplicationManager) { - builder - .addGauge(UNDER_REPLICATED_QUEUE, - replicationManager.getQueue().underReplicatedQueueSize()) + builder.addGauge(UNDER_REPLICATED_QUEUE, + replicationManager.getQueue().underReplicatedQueueSize()) .addGauge(OVER_REPLICATED_QUEUE, replicationManager.getQueue().overReplicatedQueueSize()); - } ReplicationManagerReport report = replicationManager.getContainerReport(); for (Map.Entry e : @@ -289,15 +272,6 @@ public void getMetrics(MetricsCollector collector, boolean all) { deletionCmdsSentTotal.snapshot(builder, all); replicasDeletedTotal.snapshot(builder, all); replicaDeleteTimeoutTotal.snapshot(builder, all); - if (legacyReplicationManager) { - // As things stand, the new RM does not track bytes sent / completed - replicationBytesTotal.snapshot(builder, all); - replicationBytesCompletedTotal.snapshot(builder, all); - deletionBytesTotal.snapshot(builder, all); - deletionBytesCompletedTotal.snapshot(builder, all); - replicationTime.snapshot(builder, all); - deletionTime.snapshot(builder, all); - } ecReplicationCmdsSentTotal.snapshot(builder, all); ecDeletionCmdsSentTotal.snapshot(builder, all); ecReplicasCreatedTotal.snapshot(builder, all); @@ -385,14 +359,9 @@ public void incrInflightSkipped(InflightType type) { } public long getInflightReplication() { - if (legacyReplicationManager) { - return replicationManager.getLegacyReplicationManager() - .getInflightCount(InflightType.REPLICATION); - } else { - return replicationManager.getContainerReplicaPendingOps() + return replicationManager.getContainerReplicaPendingOps() .getPendingOpCount(ContainerReplicaOp.PendingOpType.ADD, ReplicationType.RATIS); - } } public long getInflightReplicationSkipped() { @@ -400,24 +369,15 @@ public long getInflightReplicationSkipped() { } public long getInflightDeletion() { - if (legacyReplicationManager) { - return replicationManager.getLegacyReplicationManager() - .getInflightCount(InflightType.DELETION); - } else { - return replicationManager.getContainerReplicaPendingOps() - .getPendingOpCount(ContainerReplicaOp.PendingOpType.DELETE, + return replicationManager.getContainerReplicaPendingOps() + .getPendingOpCount(ContainerReplicaOp.PendingOpType.DELETE, ReplicationType.RATIS); - } } public long getInflightDeletionSkipped() { return this.inflightDeletionSkippedTotal.value(); } - public long getInflightMove() { - return replicationManager.getInflightMove().size(); - } - public long getReplicationCmdsSentTotal() { return this.replicationCmdsSentTotal.value(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java index a95c0d39945..f271b8a863c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java @@ -49,7 +49,6 @@ public interface HealthCheck { * returns false. This allows handlers to be chained together, and each will * be tried in turn until one succeeds. * @param handler - * @return */ HealthCheck addNext(HealthCheck handler); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index c6f15be5d2c..1289a0a21ff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -37,10 +37,10 @@ *

    * Currently we manage the following attributes for a container. *

    - * 1. StateMap - LifeCycleState -> Set of ContainerIDs - * 2. TypeMap - ReplicationType -> Set of ContainerIDs - * 3. OwnerMap - OwnerNames -> Set of ContainerIDs - * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs + * 1. StateMap - LifeCycleState -> Set of ContainerIDs + * 2. TypeMap - ReplicationType -> Set of ContainerIDs + * 3. OwnerMap - OwnerNames -> Set of ContainerIDs + * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs *

    * This means that for a cluster size of 750 PB -- we will have around 150 * Million containers, if we assume 5GB average container size. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java index 2b2c032cff4..eb7bc6b3ebf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java @@ -64,6 +64,13 @@ public final class SCMEvents { NodeRegistrationContainerReport.class, "Node_Registration_Container_Report"); + /** + * Event generated on DataNode Registration Container Report. + */ + public static final TypedEvent + CONTAINER_REGISTRATION_REPORT = new TypedEvent<>( + NodeRegistrationContainerReport.class, "Container_Registration_Report"); + /** * ContainerReports are sent out by Datanodes. This report is received by * SCMDatanodeHeartbeatDispatcher and Container_Report Event is generated. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java index f0d78b23079..5eeb489f677 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java @@ -58,7 +58,7 @@ /** * Utilities for SCM HA security. */ -public final class HASecurityUtils { +public final class HASecurityUtils { private HASecurityUtils() { } @@ -150,7 +150,6 @@ public static CertificateServer initializeRootCertificateServer( * * @param conf * @param certificateClient - * @return */ public static GrpcTlsConfig createSCMRatisTLSConfig(SecurityConfig conf, CertificateClient certificateClient) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java index bd4b56cd8c1..ec95ab66bf1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java @@ -18,8 +18,7 @@ package org.apache.hadoop.hdds.scm.ha; import java.io.IOException; - -import com.google.common.base.Preconditions; +import java.util.Objects; import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointRequestProto; import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointResponseProto; @@ -52,12 +51,11 @@ public class InterSCMGrpcService extends private final Table transactionInfoTable; InterSCMGrpcService(final StorageContainerManager scm) throws IOException { - Preconditions.checkNotNull(scm); + Objects.requireNonNull(scm, "scm"); this.scm = scm; this.transactionInfoTable = HAUtils.getTransactionInfoTable( - scm.getScmMetadataStore().getStore(), new SCMDBDefinition()); - provider = - new SCMDBCheckpointProvider(scm.getScmMetadataStore().getStore()); + scm.getScmMetadataStore().getStore(), SCMDBDefinition.get()); + this.provider = new SCMDBCheckpointProvider(scm.getScmMetadataStore().getStore()); } @Override @@ -67,7 +65,7 @@ public void download(CopyDBCheckpointRequestProto request, scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); - Preconditions.checkNotNull(transactionInfo); + Objects.requireNonNull(transactionInfo, "transactionInfo"); SCMGrpcOutputStream outputStream = new SCMGrpcOutputStream(responseObserver, scm.getClusterId(), BUFFER_SIZE); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java index f1ee76a198e..cd0346d72f8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.ratis.statemachine.SnapshotInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; @@ -41,6 +43,8 @@ * operation in DB. */ public class SCMHADBTransactionBufferImpl implements SCMHADBTransactionBuffer { + + public static final Logger LOG = LoggerFactory.getLogger(SCMHADBTransactionBufferImpl.class); private final StorageContainerManager scm; private SCMMetadataStore metadataStore; private BatchOperation currentBatchOperation; @@ -107,6 +111,8 @@ public SnapshotInfo getLatestSnapshot() { @Override public void setLatestSnapshot(SnapshotInfo latestSnapshot) { + LOG.info("{}: Set latest Snapshot to {}", + scm.getScmHAManager().getRatisServer().getDivision().getId(), latestSnapshot); this.latestSnapshot.set(latestSnapshot); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 03f6ae293b2..92a5140ff2a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -55,7 +55,6 @@ public interface SCMHAManager extends AutoCloseable { /** * Returns the DBTransactionBuffer as SCMHADBTransactionBuffer if its * valid. - * @return */ SCMHADBTransactionBuffer asSCMHADBTransactionBuffer(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index fc3c1548ba1..048f5fc4f53 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -72,6 +72,7 @@ public class SCMHAManagerImpl implements SCMHAManager { private final SCMRatisServer ratisServer; private final ConfigurationSource conf; + private final OzoneConfiguration ozoneConf; private final SecurityConfig securityConfig; private final DBTransactionBuffer transactionBuffer; private final SCMSnapshotProvider scmSnapshotProvider; @@ -89,6 +90,7 @@ public SCMHAManagerImpl(final ConfigurationSource conf, final SecurityConfig securityConfig, final StorageContainerManager scm) throws IOException { this.conf = conf; + this.ozoneConf = OzoneConfiguration.of(conf); this.securityConfig = securityConfig; this.scm = scm; this.exitManager = new ExitManager(); @@ -128,7 +130,7 @@ public void start() throws IOException { // It will first try to add itself to existing ring final SCMNodeDetails nodeDetails = scm.getSCMHANodeDetails().getLocalNodeDetails(); - final boolean success = HAUtils.addSCM(OzoneConfiguration.of(conf), + final boolean success = HAUtils.addSCM(ozoneConf, new AddSCMRequest.Builder().setClusterId(scm.getClusterId()) .setScmId(scm.getScmId()) .setRatisAddr(nodeDetails @@ -221,17 +223,18 @@ public List getSecretKeysFromLeader(String leaderID) } } + private TransactionInfo getTransactionInfoFromCheckpoint(Path checkpointLocation) throws IOException { + return HAUtils.getTrxnInfoFromCheckpoint( + ozoneConf, checkpointLocation, SCMDBDefinition.get()); + } + @Override public TermIndex verifyCheckpointFromLeader(String leaderId, DBCheckpoint checkpoint) { try { Path checkpointLocation = checkpoint.getCheckpointLocation(); - TransactionInfo checkpointTxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(OzoneConfiguration.of(conf), - checkpointLocation, new SCMDBDefinition()); - - LOG.info("Installing checkpoint with SCMTransactionInfo {}", - checkpointTxnInfo); + final TransactionInfo checkpointTxnInfo = getTransactionInfoFromCheckpoint(checkpointLocation); + LOG.info("{}: Verify checkpoint {} from leader {}", scm.getScmId(), checkpointTxnInfo, leaderId); TermIndex termIndex = getRatisServer().getSCMStateMachine().getLastAppliedTermIndex(); @@ -281,12 +284,9 @@ public TermIndex installCheckpoint(DBCheckpoint dbCheckpoint) throws Exception { Path checkpointLocation = dbCheckpoint.getCheckpointLocation(); - TransactionInfo checkpointTrxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(OzoneConfiguration.of(conf), - checkpointLocation, new SCMDBDefinition()); + final TransactionInfo checkpointTrxnInfo = getTransactionInfoFromCheckpoint(checkpointLocation); - LOG.info("Installing checkpoint with SCMTransactionInfo {}", - checkpointTrxnInfo); + LOG.info("{}: Install checkpoint {}", scm.getScmId(), checkpointTrxnInfo); return installCheckpoint(checkpointLocation, checkpointTrxnInfo); } @@ -457,14 +457,12 @@ public void startServices() throws IOException { // TODO: Fix the metrics ?? final SCMMetadataStore metadataStore = scm.getScmMetadataStore(); - metadataStore.start(OzoneConfiguration.of(conf)); + metadataStore.start(ozoneConf); scm.getSequenceIdGen().reinitialize(metadataStore.getSequenceIdTable()); scm.getPipelineManager().reinitialize(metadataStore.getPipelineTable()); scm.getContainerManager().reinitialize(metadataStore.getContainerTable()); scm.getScmBlockManager().getDeletedBlockLog().reinitialize( metadataStore.getDeletedBlocksTXTable()); - scm.getReplicationManager().getMoveScheduler() - .reinitialize(metadataStore.getMoveTable()); scm.getStatefulServiceStateManager().reinitialize( metadataStore.getStatefulServiceConfigTable()); if (OzoneSecurityUtil.isSecurityEnabled(conf)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java index b9539684ed0..85664dd232f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java @@ -23,6 +23,7 @@ import java.util.EnumMap; import java.util.List; import java.util.Map; +import java.util.UUID; import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; @@ -170,6 +171,8 @@ private class RatisServerStub implements SCMRatisServer { private Map handlers = new EnumMap<>(RequestType.class); + private RaftPeerId leaderId = RaftPeerId.valueOf(UUID.randomUUID().toString()); + @Override public void start() { } @@ -283,5 +286,10 @@ public boolean removeSCM(RemoveSCMRequest request) throws IOException { public GrpcTlsConfig getGrpcTlsConfig() { return null; } + + @Override + public RaftPeerId getLeaderId() { + return leaderId; + } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java index a786bd2944f..4e883b27a7d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.RemoveSCMRequest; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; @@ -68,4 +69,6 @@ SCMRatisResponse submitRequest(SCMRatisRequest request) GrpcTlsConfig getGrpcTlsConfig(); + RaftPeerId getLeaderId(); + } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java index 70dffba27ec..0383bf18095 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -29,6 +29,7 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import jakarta.annotation.Nullable; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -147,6 +148,16 @@ public GrpcTlsConfig getGrpcTlsConfig() { return grpcTlsConfig; } + @Override + @Nullable + public RaftPeerId getLeaderId() { + RaftPeer raftLeaderPeer = getLeader(); + if (raftLeaderPeer != null) { + return raftLeaderPeer.getId(); + } + return null; + } + private static void waitForLeaderToBeReady(RaftServer server, OzoneConfiguration conf, RaftGroup group) throws IOException { boolean ready; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index 1128accd2ff..5805fe67e49 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -137,6 +137,7 @@ public void initialize(RaftServer server, RaftGroupId id, getLifeCycle().startAndTransition(() -> { super.initialize(server, id, raftStorage); storage.init(raftStorage); + LOG.info("{}: initialize {}", server.getId(), id); }); } @@ -149,6 +150,9 @@ public CompletableFuture applyTransaction( final SCMRatisRequest request = SCMRatisRequest.decode( Message.valueOf(trx.getStateMachineLogEntry().getLogData())); + if (LOG.isDebugEnabled()) { + LOG.debug("{}: applyTransaction {}", getId(), TermIndex.valueOf(trx.getLogEntry())); + } try { applyTransactionFuture.complete(process(request)); } catch (SCMException ex) { @@ -389,6 +393,7 @@ public void notifyConfigurationChanged(long term, long index, @Override public void pause() { final LifeCycle lc = getLifeCycle(); + LOG.info("{}: Try to pause from current LifeCycle state {}", getId(), lc); if (lc.getCurrentState() != LifeCycle.State.NEW) { lc.transition(LifeCycle.State.PAUSING); lc.transition(LifeCycle.State.PAUSED); @@ -414,6 +419,8 @@ public void reinitialize() throws IOException { throw new IOException(e); } + LOG.info("{}: SCMStateMachine is reinitializing. newTermIndex = {}", getId(), termIndex); + // re-initialize the DBTransactionBuffer and update the lastAppliedIndex. try { transactionBuffer.init(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java index 5a7e86e99cc..ab753096716 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java @@ -38,6 +38,11 @@ private BigIntegerCodec() { // singleton } + @Override + public Class getTypeClass() { + return BigInteger.class; + } + @Override public byte[] toPersistedFormat(BigInteger object) throws IOException { return object.toByteArray(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java index 4a280d2103a..ea86fa154af 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java @@ -41,75 +41,53 @@ * Class defines the structure and types of the scm.db. */ public class SCMDBDefinition extends DBDefinition.WithMap { - public SCMDBDefinition() { - this(COLUMN_FAMILIES); - } - - protected SCMDBDefinition(Map> map) { - super(map); - } - public static final DBColumnFamilyDefinition DELETED_BLOCKS = new DBColumnFamilyDefinition<>( "deletedBlocks", - Long.class, LongCodec.get(), - DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition VALID_CERTS = new DBColumnFamilyDefinition<>( "validCerts", - BigInteger.class, BigIntegerCodec.get(), - X509Certificate.class, X509CertificateCodec.get()); public static final DBColumnFamilyDefinition VALID_SCM_CERTS = new DBColumnFamilyDefinition<>( "validSCMCerts", - BigInteger.class, BigIntegerCodec.get(), - X509Certificate.class, X509CertificateCodec.get()); public static final DBColumnFamilyDefinition PIPELINES = new DBColumnFamilyDefinition<>( "pipelines", - PipelineID.class, PipelineID.getCodec(), - Pipeline.class, Pipeline.getCodec()); public static final DBColumnFamilyDefinition CONTAINERS = new DBColumnFamilyDefinition<>( "containers", - ContainerID.class, ContainerID.getCodec(), - ContainerInfo.class, ContainerInfo.getCodec()); public static final DBColumnFamilyDefinition TRANSACTIONINFO = new DBColumnFamilyDefinition<>( "scmTransactionInfos", - String.class, StringCodec.get(), - TransactionInfo.class, TransactionInfo.getCodec()); public static final DBColumnFamilyDefinition SEQUENCE_ID = new DBColumnFamilyDefinition<>( "sequenceId", - String.class, StringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition> map) { MOVE = new DBColumnFamilyDefinition<>( "move", - ContainerID.class, ContainerID.getCodec(), - MoveDataNodePair.class, MoveDataNodePair.getCodec()); /** @@ -129,18 +105,14 @@ protected SCMDBDefinition(Map> map) { public static final DBColumnFamilyDefinition META = new DBColumnFamilyDefinition<>( "meta", - String.class, StringCodec.get(), - String.class, StringCodec.get()); public static final DBColumnFamilyDefinition STATEFUL_SERVICE_CONFIG = new DBColumnFamilyDefinition<>( "statefulServiceConfig", - String.class, StringCodec.get(), - ByteString.class, ByteStringCodec.get()); private static final Map> @@ -156,6 +128,16 @@ protected SCMDBDefinition(Map> map) { VALID_CERTS, VALID_SCM_CERTS); + private static final SCMDBDefinition INSTANCE = new SCMDBDefinition(COLUMN_FAMILIES); + + public static SCMDBDefinition get() { + return INSTANCE; + } + + protected SCMDBDefinition(Map> map) { + super(map); + } + @Override public String getName() { return "scm.db"; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java index ec63076b4a6..6aa993f6077 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java @@ -104,7 +104,7 @@ public SCMMetadataStoreImpl(OzoneConfiguration config) public void start(OzoneConfiguration config) throws IOException { if (this.store == null) { - SCMDBDefinition scmdbDefinition = new SCMDBDefinition(); + final SCMDBDefinition scmdbDefinition = SCMDBDefinition.get(); File metaDir = HAUtils.getMetaDir(scmdbDefinition, configuration); // Check if there is a DB Inconsistent Marker in the metaDir. This // marker indicates that the DB is in an inconsistent state and hence diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java index 3e7db16c2a0..e0279e8f2f0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java @@ -51,6 +51,11 @@ private X509CertificateCodec() { // singleton } + @Override + public Class getTypeClass() { + return X509Certificate.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java index 23bf41dc83e..41c410a9032 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java @@ -441,20 +441,10 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) continue; } - // If we get here, the container is closed or quasi-closed and all the replicas match that - // state, except for any which are unhealthy. As the container is closed, we can check - // if it is sufficiently replicated using replicationManager, but this only works if the - // legacy RM is not enabled. - boolean legacyEnabled = conf.getBoolean("hdds.scm.replication.enable" + - ".legacy", false); - boolean replicatedOK; - if (legacyEnabled) { - replicatedOK = replicaSet.isSufficientlyReplicatedForOffline(dn.getDatanodeDetails(), nodeManager); - } else { - ReplicationManagerReport report = new ReplicationManagerReport(); - replicationManager.checkContainerStatus(replicaSet.getContainer(), report); - replicatedOK = report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED) == 0; - } + ReplicationManagerReport report = new ReplicationManagerReport(); + replicationManager.checkContainerStatus(replicaSet.getContainer(), report); + boolean replicatedOK = report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED) == 0; + if (replicatedOK) { sufficientlyReplicated++; } else { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index 05ed833edbe..b3350d8a12a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -346,7 +346,7 @@ public void setCommandCounts(CommandQueueReportProto cmds, * Retrieve the number of queued commands of the given type, as reported by * the datanode at the last heartbeat. * @param cmd The command for which to receive the queued command count - * @return -1 if we have no information about the count, or an integer >= 0 + * @return -1 if we have no information about the count, or an integer >= 0 * indicating the command count at the last heartbeat. */ public int getCommandCount(SCMCommandProto.Type cmd) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java index 4f7df496906..1cafab3f67c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java @@ -32,6 +32,7 @@ public class DatanodeUsageInfo { private DatanodeDetails datanodeDetails; private SCMNodeStat scmNodeStat; private int containerCount; + private int pipelineCount; /** * Constructs a DatanodeUsageInfo with DatanodeDetails and SCMNodeStat. @@ -45,6 +46,7 @@ public DatanodeUsageInfo( this.datanodeDetails = datanodeDetails; this.scmNodeStat = scmNodeStat; this.containerCount = -1; + this.pipelineCount = -1; } /** @@ -145,6 +147,14 @@ public void setContainerCount(int containerCount) { this.containerCount = containerCount; } + public int getPipelineCount() { + return pipelineCount; + } + + public void setPipelineCount(int pipelineCount) { + this.pipelineCount = pipelineCount; + } + /** * Gets Comparator that compares two DatanodeUsageInfo on the basis of * their utilization values. Utilization is (capacity - remaining) divided @@ -210,6 +220,7 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) { } builder.setContainerCount(containerCount); + builder.setPipelineCount(pipelineCount); return builder; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index d6f0e89c96d..375d68dfe32 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -215,9 +215,9 @@ private boolean allPortsMatch(List dns) { if (dns.size() < 2) { return true; } - int port = dns.get(0).getPort(DatanodeDetails.Port.Name.RATIS).getValue(); + int port = dns.get(0).getRatisPort().getValue(); for (int i = 1; i < dns.size(); i++) { - if (dns.get(i).getPort(DatanodeDetails.Port.Name.RATIS).getValue() + if (dns.get(i).getRatisPort().getValue() != port) { return false; } @@ -398,10 +398,12 @@ private synchronized boolean checkIfDecommissionPossible(List d if (opState != NodeOperationalState.IN_SERVICE) { numDecom--; validDns.remove(dn); + LOG.warn("Cannot decommission {} because it is not IN-SERVICE", dn.getHostName()); } } catch (NodeNotFoundException ex) { numDecom--; validDns.remove(dn); + LOG.warn("Cannot decommission {} because it is not found in SCM", dn.getHostName()); } } @@ -430,9 +432,11 @@ private synchronized boolean checkIfDecommissionPossible(List d } int reqNodes = cif.getReplicationConfig().getRequiredNodes(); if ((inServiceTotal - numDecom) < reqNodes) { + int unHealthyTotal = nodeManager.getAllNodes().size() - inServiceTotal; String errorMsg = "Insufficient nodes. Tried to decommission " + dns.size() + - " nodes of which " + numDecom + " nodes were valid. Cluster has " + inServiceTotal + - " IN-SERVICE nodes, " + reqNodes + " of which are required for minimum replication. "; + " nodes out of " + inServiceTotal + " IN-SERVICE HEALTHY and " + unHealthyTotal + + " not IN-SERVICE or not HEALTHY nodes. Cannot decommission as a minimum of " + reqNodes + + " IN-SERVICE HEALTHY nodes are required to maintain replication after decommission. "; LOG.info(errorMsg + "Failing due to datanode : {}, container : {}", dn, cid); errors.add(new DatanodeAdminError("AllHosts", errorMsg)); return false; @@ -552,10 +556,12 @@ private synchronized boolean checkIfMaintenancePossible(List dn if (opState != NodeOperationalState.IN_SERVICE) { numMaintenance--; validDns.remove(dn); + LOG.warn("{} cannot enter maintenance because it is not IN-SERVICE", dn.getHostName()); } } catch (NodeNotFoundException ex) { numMaintenance--; validDns.remove(dn); + LOG.warn("{} cannot enter maintenance because it is not found in SCM", dn.getHostName()); } } @@ -594,9 +600,11 @@ private synchronized boolean checkIfMaintenancePossible(List dn minInService = maintenanceReplicaMinimum; } if ((inServiceTotal - numMaintenance) < minInService) { + int unHealthyTotal = nodeManager.getAllNodes().size() - inServiceTotal; String errorMsg = "Insufficient nodes. Tried to start maintenance for " + dns.size() + - " nodes of which " + numMaintenance + " nodes were valid. Cluster has " + inServiceTotal + - " IN-SERVICE nodes, " + minInService + " of which are required for minimum replication. "; + " nodes out of " + inServiceTotal + " IN-SERVICE HEALTHY and " + unHealthyTotal + + " not IN-SERVICE or not HEALTHY nodes. Cannot enter maintenance mode as a minimum of " + minInService + + " IN-SERVICE HEALTHY nodes are required to maintain replication after maintenance. "; LOG.info(errorMsg + "Failing due to datanode : {}, container : {}", dn, cid); errors.add(new DatanodeAdminError("AllHosts", errorMsg)); return false; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 25be60945a9..992dc82582b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -97,8 +97,6 @@ default RegisteredCommand register( * @param type The type of the SCMCommand. * @param scmCommand A BiConsumer that takes a DatanodeDetails and a * SCMCommand object and performs the necessary actions. - * @return whatever the regular register command returns with default - * layout version passed in. */ default void registerSendCommandNotify(SCMCommandProto.Type type, BiConsumer> scmCommand) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java index a66fc0d22fb..1bd9677a363 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java @@ -45,7 +45,7 @@ public interface NodeManagerMXBean { /** * @return Get the NodeStatus table information like hostname, - * Commissioned State & Operational State column for dataNode + * Commissioned State and Operational State column for dataNode */ Map> getNodeStatusInfo(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index 3c3ff8fb833..78c1801a103 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -791,7 +791,7 @@ public void run() { * * This method is synchronized to coordinate node state updates between * the upgrade finalization thread which calls this method, and the - * node health processing thread that calls {@link this#checkNodesHealth}. + * node health processing thread that calls {@link #checkNodesHealth}. */ public synchronized void forceNodesToHealthyReadOnly() { try { @@ -817,7 +817,7 @@ public synchronized void forceNodesToHealthyReadOnly() { /** * This method is synchronized to coordinate node state updates between * the upgrade finalization thread which calls - * {@link this#forceNodesToHealthyReadOnly}, and the node health processing + * {@link #forceNodesToHealthyReadOnly}, and the node health processing * thread that calls this method. */ @VisibleForTesting diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 05a68628852..7db0c88e173 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -146,6 +146,8 @@ public class SCMNodeManager implements NodeManager { private static final String LASTHEARTBEAT = "LASTHEARTBEAT"; private static final String USEDSPACEPERCENT = "USEDSPACEPERCENT"; private static final String TOTALCAPACITY = "CAPACITY"; + private static final String DNUUID = "UUID"; + private static final String VERSION = "VERSION"; /** * Constructs SCM machine Manager. */ @@ -447,6 +449,11 @@ public RegisteredCommand register( processNodeReport(datanodeDetails, nodeReport); LOG.info("Updated datanode to: {}", dn); scmNodeEventPublisher.fireEvent(SCMEvents.NODE_ADDRESS_UPDATE, dn); + } else if (isVersionChange(oldNode.getVersion(), datanodeDetails.getVersion())) { + LOG.info("Update the version for registered datanode = {}, " + + "oldVersion = {}, newVersion = {}.", + datanodeDetails.getUuid(), oldNode.getVersion(), datanodeDetails.getVersion()); + nodeStateManager.updateNode(datanodeDetails, layoutInfo); } } catch (NodeNotFoundException e) { LOG.error("Cannot find datanode {} from nodeStateManager", @@ -508,6 +515,18 @@ private boolean updateDnsToUuidMap( return ipChanged || hostNameChanged; } + /** + * Check if the version has been updated. + * + * @param oldVersion datanode oldVersion + * @param newVersion datanode newVersion + * @return true means replacement is needed, while false means replacement is not needed. + */ + private boolean isVersionChange(String oldVersion, String newVersion) { + final boolean versionChanged = !Objects.equals(oldVersion, newVersion); + return versionChanged; + } + /** * Send heartbeat to indicate the datanode is alive and doing well. * @@ -982,6 +1001,7 @@ public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) { DatanodeUsageInfo usageInfo = new DatanodeUsageInfo(dn, stat); try { usageInfo.setContainerCount(getContainerCount(dn)); + usageInfo.setPipelineCount(getPipeLineCount(dn)); } catch (NodeNotFoundException ex) { LOG.error("Unknown datanode {}.", dn, ex); } @@ -1135,6 +1155,8 @@ public Map> getNodeStatusInfo() { String nonScmUsedPerc = storagePercentage[1]; map.put(USEDSPACEPERCENT, "Ozone: " + scmUsedPerc + "%, other: " + nonScmUsedPerc + "%"); + map.put(DNUUID, dni.getUuidString()); + map.put(VERSION, dni.getVersion()); nodes.put(hostName, map); } return nodes; @@ -1144,7 +1166,6 @@ public Map> getNodeStatusInfo() { * Calculate the storage capacity of the DataNode node. * @param storageReports Calculate the storage capacity corresponding * to the storage collection. - * @return */ public static String calculateStorageCapacity( List storageReports) { @@ -1192,7 +1213,6 @@ private static String convertUnit(double value) { * Calculate the storage usage percentage of a DataNode node. * @param storageReports Calculate the storage percentage corresponding * to the storage collection. - * @return */ public static String[] calculateStoragePercentage( List storageReports) { @@ -1610,6 +1630,11 @@ public int getContainerCount(DatanodeDetails datanodeDetails) return nodeStateManager.getContainerCount(datanodeDetails.getUuid()); } + public int getPipeLineCount(DatanodeDetails datanodeDetails) + throws NodeNotFoundException { + return nodeStateManager.getPipelinesCount(datanodeDetails); + } + @Override public void addDatanodeCommand(UUID dnId, SCMCommand command) { writeLock().lock(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index dcc7bdb3fba..b931f122a97 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.safemode.SafeModeManager; import org.apache.hadoop.hdds.scm.server @@ -91,35 +92,45 @@ public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, for (PipelineReport report : pipelineReport.getPipelineReportList()) { try { processPipelineReport(report, dn, publisher); - } catch (NotLeaderException ex) { - // Avoid NotLeaderException logging which happens when processing - // pipeline report on followers. } catch (PipelineNotFoundException e) { - LOGGER.error("Could not find pipeline {}", report.getPipelineID()); + handlePipelineNotFoundException(report, dn, publisher); } catch (IOException | TimeoutException e) { - LOGGER.error("Could not process pipeline report={} from dn={}.", - report, dn, e); + // Ignore NotLeaderException logging which happens when processing + // pipeline report on followers. + if (!isNotLeaderException(e)) { + LOGGER.error("Could not process pipeline report={} from dn={}.", + report, dn, e); + } } } } - protected void processPipelineReport(PipelineReport report, - DatanodeDetails dn, EventPublisher publisher) - throws IOException, TimeoutException { - PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID()); - Pipeline pipeline; - try { - pipeline = pipelineManager.getPipeline(pipelineID); - } catch (PipelineNotFoundException e) { - if (scmContext.isLeader()) { - LOGGER.info("Reported pipeline {} is not found", pipelineID); - SCMCommand< ? > command = new ClosePipelineCommand(pipelineID); + private void handlePipelineNotFoundException(final PipelineReport report, + final DatanodeDetails dn, final EventPublisher publisher) { + final PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID()); + LOGGER.info("Pipeline {}, reported by datanode {} is not found.", pipelineID, dn); + if (scmContext.isLeader()) { + try { + final SCMCommand command = new ClosePipelineCommand(pipelineID); command.setTerm(scmContext.getTermOfLeader()); publisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(dn.getUuid(), command)); + } catch (NotLeaderException ex) { + // Do nothing if the leader has changed. } - return; } + } + + private static boolean isNotLeaderException(final Exception e) { + return e instanceof SCMException && ((SCMException) e).getResult().equals( + SCMException.ResultCodes.SCM_NOT_LEADER); + } + + protected void processPipelineReport(PipelineReport report, + DatanodeDetails dn, EventPublisher publisher) + throws IOException, TimeoutException { + final PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID()); + final Pipeline pipeline = pipelineManager.getPipeline(pipelineID); setReportedDatanode(pipeline, dn); setPipelineLeaderId(report, pipeline, dn); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java index 4dd0443a505..d6058877126 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java @@ -46,7 +46,6 @@ void addPipeline(HddsProtos.Pipeline pipelineProto) /** * Removing pipeline would be replicated to Ratis. * @param pipelineIDProto - * @return Pipeline removed * @throws IOException */ @Replicate diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index e77e2aebb31..c1431845ce1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse; @@ -216,7 +217,7 @@ public AllocateScmBlockResponseProto allocateScmBlock( for (AllocatedBlock block : allocatedBlocks) { builder.addBlocks(AllocateBlockResponse.newBuilder() .setContainerBlockID(block.getBlockID().getProtobuf()) - .setPipeline(block.getPipeline().getProtobufMessage(clientVersion))); + .setPipeline(block.getPipeline().getProtobufMessage(clientVersion, Name.IO_PORTS))); } return builder.build(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 62b27f503e2..80ea82ed521 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -117,6 +117,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ResetDeletedBlockRetryCountResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -865,21 +866,21 @@ public SCMListContainerResponseProto listContainer( } else if (request.hasFactor()) { factor = request.getFactor(); } - List containerList; + ContainerListResult containerListAndTotalCount; if (factor != null) { // Call from a legacy client - containerList = + containerListAndTotalCount = impl.listContainer(startContainerID, count, state, factor); } else { - containerList = - impl.listContainer(startContainerID, count, state, replicationType, - repConfig); + containerListAndTotalCount = + impl.listContainer(startContainerID, count, state, replicationType, repConfig); } SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerList) { + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { builder.addContainers(container.getProtobuf()); } + builder.setContainerCount(containerListAndTotalCount.getTotalCount()); return builder.build(); } @@ -1009,6 +1010,7 @@ public HddsProtos.GetScmInfoResponseProto getScmInfo( .setClusterId(scmInfo.getClusterId()) .setScmId(scmInfo.getScmId()) .addAllPeerRoles(scmInfo.getRatisPeerRoles()) + .setScmRatisEnabled(scmInfo.getScmRatisEnabled()) .build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index ae645858a33..bdd7160de4c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -19,16 +19,25 @@ import java.util.List; import java.util.Map; -import java.util.Optional; +import java.util.UUID; +import java.util.Set; +import java.util.HashSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; +import com.google.common.collect.Sets; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -50,12 +59,21 @@ public class ContainerSafeModeRule extends // Required cutoff % for containers with at least 1 reported replica. private double safeModeCutoff; // Containers read from scm db (excluding containers in ALLOCATED state). - private Map containerMap; - private double maxContainer; - - private AtomicLong containerWithMinReplicas = new AtomicLong(0); + private Set ratisContainers; + private Set ecContainers; + private Map> ecContainerDNsMap; + private double ratisMaxContainer; + private double ecMaxContainer; + private AtomicLong ratisContainerWithMinReplicas = new AtomicLong(0); + private AtomicLong ecContainerWithMinReplicas = new AtomicLong(0); private final ContainerManager containerManager; + public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, + ConfigurationSource conf, + ContainerManager containerManager, SCMSafeModeManager manager) { + this(ruleName, eventQueue, conf, containerManager.getContainers(), containerManager, manager); + } + public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, ConfigurationSource conf, List containers, @@ -71,127 +89,268 @@ public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT + " value should be >= 0.0 and <= 1.0"); - containerMap = new ConcurrentHashMap<>(); - containers.forEach(container -> { - // There can be containers in OPEN/CLOSING state which were never - // created by the client. We are not considering these containers for - // now. These containers can be handled by tracking pipelines. - - Optional.ofNullable(container.getState()) - .filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED || - state == HddsProtos.LifeCycleState.CLOSED) - && container.getNumberOfKeys() > 0) - .ifPresent(s -> containerMap.put(container.getContainerID(), - container)); - }); - maxContainer = containerMap.size(); - long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff); - getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(cutOff); + ratisContainers = new HashSet<>(); + ecContainers = new HashSet<>(); + ecContainerDNsMap = new ConcurrentHashMap<>(); - LOG.info("containers with one replica threshold count {}", cutOff); + initializeRule(containers); } @Override protected TypedEvent getEventType() { - return SCMEvents.NODE_REGISTRATION_CONT_REPORT; + return SCMEvents.CONTAINER_REGISTRATION_REPORT; } - @Override protected synchronized boolean validate() { - return getCurrentContainerThreshold() >= safeModeCutoff; + return (getCurrentContainerThreshold() >= safeModeCutoff) && + (getCurrentECContainerThreshold() >= safeModeCutoff); } @VisibleForTesting public synchronized double getCurrentContainerThreshold() { - if (maxContainer == 0) { + if (ratisMaxContainer == 0) { + return 1; + } + return (ratisContainerWithMinReplicas.doubleValue() / ratisMaxContainer); + } + + @VisibleForTesting + public synchronized double getCurrentECContainerThreshold() { + if (ecMaxContainer == 0) { return 1; } - return (containerWithMinReplicas.doubleValue() / maxContainer); + return (ecContainerWithMinReplicas.doubleValue() / ecMaxContainer); + } + + private synchronized double getEcMaxContainer() { + if (ecMaxContainer == 0) { + return 1; + } + return ecMaxContainer; + } + + private synchronized double getRatisMaxContainer() { + if (ratisMaxContainer == 0) { + return 1; + } + return ratisMaxContainer; } @Override protected synchronized void process( NodeRegistrationContainerReport reportsProto) { + DatanodeDetails datanodeDetails = reportsProto.getDatanodeDetails(); + UUID datanodeUUID = datanodeDetails.getUuid(); + StorageContainerDatanodeProtocolProtos.ContainerReportsProto report = reportsProto.getReport(); - reportsProto.getReport().getReportsList().forEach(c -> { - if (containerMap.containsKey(c.getContainerID())) { - if (containerMap.remove(c.getContainerID()) != null) { - containerWithMinReplicas.getAndAdd(1); - getSafeModeMetrics() - .incCurrentContainersWithOneReplicaReportedCount(); - } + report.getReportsList().forEach(c -> { + long containerID = c.getContainerID(); + + + // If it is a Ratis container. + if (ratisContainers.contains(containerID)) { + recordReportedContainer(containerID, Boolean.FALSE); + ratisContainers.remove(containerID); + } + + // If it is an EC container. + if (ecContainers.contains(containerID)) { + putInContainerDNsMap(containerID, ecContainerDNsMap, datanodeUUID); + recordReportedContainer(containerID, Boolean.TRUE); } }); if (scmInSafeMode()) { SCMSafeModeManager.getLogger().info( - "SCM in safe mode. {} % containers have at least one" - + " reported replica.", - (containerWithMinReplicas.doubleValue() / maxContainer) * 100); + "SCM in safe mode. {} % containers [Ratis] have at least one" + + " reported replica, {} % containers [EC] have at N reported replica.", + ((ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100), + ((ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100) + ); + } + } + + /** + * Record the reported Container. + * + * We will differentiate and count according to the type of Container. + * + * @param containerID containerID + * @param isEcContainer true, means ECContainer, false, means not ECContainer. + */ + private void recordReportedContainer(long containerID, boolean isEcContainer) { + + int uuids = 1; + if (isEcContainer && ecContainerDNsMap.containsKey(containerID)) { + uuids = ecContainerDNsMap.get(containerID).size(); + } + + int minReplica = getMinReplica(containerID); + if (uuids >= minReplica) { + if (isEcContainer) { + getSafeModeMetrics() + .incCurrentContainersWithECDataReplicaReportedCount(); + ecContainerWithMinReplicas.getAndAdd(1); + } else { + ratisContainerWithMinReplicas.getAndAdd(1); + getSafeModeMetrics() + .incCurrentContainersWithOneReplicaReportedCount(); + } + } + } + + /** + * Get the minimum replica. + * + * If it is a Ratis Contianer, the minimum copy is 1. + * If it is an EC Container, the minimum copy will be the number of Data in replicationConfig. + * + * @param pContainerID containerID + * @return MinReplica. + */ + private int getMinReplica(long pContainerID) { + + try { + ContainerID containerID = ContainerID.valueOf(pContainerID); + ContainerInfo container = containerManager.getContainer(containerID); + ReplicationConfig replicationConfig = container.getReplicationConfig(); + return replicationConfig.getMinimumNodes(); + } catch (ContainerNotFoundException e) { + LOG.error("containerId = {} not found.", pContainerID, e); + } catch (Exception e) { + LOG.error("containerId = {} not found.", pContainerID, e); } + + return 1; + } + + private void putInContainerDNsMap(long containerID, Map> containerDNsMap, + UUID datanodeUUID) { + containerDNsMap.computeIfAbsent(containerID, key -> Sets.newHashSet()); + containerDNsMap.get(containerID).add(datanodeUUID); } @Override protected synchronized void cleanup() { - containerMap.clear(); + ratisContainers.clear(); + ecContainers.clear(); + ecContainerDNsMap.clear(); } @Override public String getStatusText() { - List sampleContainers = containerMap.keySet() - .stream() - .limit(SAMPLE_CONTAINER_DISPLAY_LIMIT) - .collect(Collectors.toList()); - String status = String.format("%% of containers with at least one reported" - + " replica (=%1.2f) >= safeModeCutoff (=%1.2f)", + // ratis container + String status = String.format( + "%1.2f%% of [Ratis] Containers(%s / %s) with at least one reported replica (=%1.2f) >= " + + "safeModeCutoff (=%1.2f);", + (ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100, + ratisContainerWithMinReplicas, (long) getRatisMaxContainer(), getCurrentContainerThreshold(), this.safeModeCutoff); - if (!sampleContainers.isEmpty()) { + Set sampleRatisContainers = ratisContainers.stream(). + limit(SAMPLE_CONTAINER_DISPLAY_LIMIT). + collect(Collectors.toSet()); + + if (!sampleRatisContainers.isEmpty()) { String sampleContainerText = - "Sample containers not satisfying the criteria : " + sampleContainers; + "Sample Ratis Containers not satisfying the criteria : " + sampleRatisContainers + ";"; status = status.concat("\n").concat(sampleContainerText); } + // ec container + String ecStatus = String.format( + "%1.2f%% of [EC] Containers(%s / %s) with at least N reported replica (=%1.2f) >= " + + "safeModeCutoff (=%1.2f);", + (ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100, + ecContainerWithMinReplicas, (long) getEcMaxContainer(), + getCurrentECContainerThreshold(), this.safeModeCutoff); + status = status.concat("\n").concat(ecStatus); + + Set sampleEcContainers = ecContainerDNsMap.entrySet().stream(). + filter(entry -> { + Long containerId = entry.getKey(); + int minReplica = getMinReplica(containerId); + Set allReplicas = entry.getValue(); + if (allReplicas.size() >= minReplica) { + return false; + } + return true; + }). + map(Map.Entry::getKey). + limit(SAMPLE_CONTAINER_DISPLAY_LIMIT). + collect(Collectors.toSet()); + + if (!sampleEcContainers.isEmpty()) { + String sampleECContainerText = + "Sample EC Containers not satisfying the criteria : " + sampleEcContainers + ";"; + status = status.concat("\n").concat(sampleECContainerText); + } + return status; } @Override public synchronized void refresh(boolean forceRefresh) { + List containers = containerManager.getContainers(); if (forceRefresh) { - reInitializeRule(); + initializeRule(containers); } else { if (!validate()) { - reInitializeRule(); + initializeRule(containers); } } } - private void reInitializeRule() { - containerMap.clear(); - containerManager.getContainers().forEach(container -> { + private boolean checkContainerState(LifeCycleState state) { + if (state == LifeCycleState.QUASI_CLOSED || state == LifeCycleState.CLOSED) { + return true; + } + return false; + } + + private void initializeRule(List containers) { + + // Clean up the related data in the map. + ratisContainers.clear(); + ecContainers.clear(); + + // Iterate through the container list to + // get the minimum replica count for each container. + containers.forEach(container -> { // There can be containers in OPEN/CLOSING state which were never // created by the client. We are not considering these containers for // now. These containers can be handled by tracking pipelines. - Optional.ofNullable(container.getState()) - .filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED || - state == HddsProtos.LifeCycleState.CLOSED) - && container.getNumberOfKeys() > 0) - .ifPresent(s -> containerMap.put(container.getContainerID(), - container)); + LifeCycleState containerState = container.getState(); + HddsProtos.ReplicationType replicationType = container.getReplicationType(); + + if (checkContainerState(containerState) && container.getNumberOfKeys() > 0) { + // If it's of type Ratis + if (replicationType.equals(HddsProtos.ReplicationType.RATIS)) { + ratisContainers.add(container.getContainerID()); + } + + // If it's of type EC + if (replicationType.equals(HddsProtos.ReplicationType.EC)) { + ecContainers.add(container.getContainerID()); + } + } }); - maxContainer = containerMap.size(); - long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff); + ratisMaxContainer = ratisContainers.size(); + ecMaxContainer = ecContainers.size(); - LOG.info("Refreshed one replica container threshold {}, " + - "currentThreshold {}", cutOff, containerWithMinReplicas.get()); - getSafeModeMetrics() - .setNumContainerWithOneReplicaReportedThreshold(cutOff); - } + long ratisCutOff = (long) Math.ceil(ratisMaxContainer * safeModeCutoff); + long ecCutOff = (long) Math.ceil(ecMaxContainer * safeModeCutoff); + + getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(ratisCutOff); + getSafeModeMetrics().setNumContainerWithECDataReplicaReportedThreshold(ecCutOff); + LOG.info("Refreshed Containers with one replica threshold count {}, " + + "with ec n replica threshold count {}.", ratisCutOff, ecCutOff); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java index a5ecdb23425..78ce994af73 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -90,7 +91,7 @@ public class SCMSafeModeManager implements SafeModeManager { private AtomicBoolean preCheckComplete = new AtomicBoolean(false); private AtomicBoolean forceExitSafeMode = new AtomicBoolean(false); - private Map exitRules = new HashMap(1); + private Map exitRules = new HashMap<>(1); private Set preCheckRules = new HashSet<>(1); private ConfigurationSource config; private static final String CONT_EXIT_RULE = "ContainerSafeModeRule"; @@ -110,6 +111,8 @@ public class SCMSafeModeManager implements SafeModeManager { private final SafeModeMetrics safeModeMetrics; + + // TODO: Remove allContainers argument. (HDDS-11795) public SCMSafeModeManager(ConfigurationSource conf, List allContainers, ContainerManager containerManager, PipelineManager pipelineManager, @@ -126,30 +129,17 @@ public SCMSafeModeManager(ConfigurationSource conf, if (isSafeModeEnabled) { this.safeModeMetrics = SafeModeMetrics.create(); - ContainerSafeModeRule containerSafeModeRule = - new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config, - allContainers, containerManager, this); - DataNodeSafeModeRule dataNodeSafeModeRule = - new DataNodeSafeModeRule(DN_EXIT_RULE, eventQueue, config, this); - exitRules.put(CONT_EXIT_RULE, containerSafeModeRule); - exitRules.put(DN_EXIT_RULE, dataNodeSafeModeRule); - preCheckRules.add(DN_EXIT_RULE); - if (conf.getBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT) - && pipelineManager != null) { - HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = - new HealthyPipelineSafeModeRule(HEALTHY_PIPELINE_EXIT_RULE, - eventQueue, pipelineManager, - this, config, scmContext); - OneReplicaPipelineSafeModeRule oneReplicaPipelineSafeModeRule = - new OneReplicaPipelineSafeModeRule( - ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, eventQueue, - pipelineManager, this, conf); - exitRules.put(HEALTHY_PIPELINE_EXIT_RULE, healthyPipelineSafeModeRule); - exitRules.put(ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, - oneReplicaPipelineSafeModeRule); - } + + // TODO: Remove the cyclic ("this") dependency (HDDS-11797) + SafeModeRuleFactory.initialize(config, scmContext, eventQueue, + this, pipelineManager, containerManager); + SafeModeRuleFactory factory = SafeModeRuleFactory.getInstance(); + + exitRules = factory.getSafeModeRules().stream().collect( + Collectors.toMap(SafeModeExitRule::getRuleName, rule -> rule)); + + preCheckRules = factory.getPreCheckRules().stream() + .map(SafeModeExitRule::getRuleName).collect(Collectors.toSet()); } else { this.safeModeMetrics = null; exitSafeMode(eventQueue, true); @@ -341,6 +331,17 @@ public double getCurrentContainerThreshold() { .getCurrentContainerThreshold(); } + @VisibleForTesting + public double getCurrentECContainerThreshold() { + return ((ContainerSafeModeRule) exitRules.get(CONT_EXIT_RULE)) + .getCurrentECContainerThreshold(); + } + + @VisibleForTesting + public ContainerSafeModeRule getContainerSafeModeRule() { + return (ContainerSafeModeRule) exitRules.get(CONT_EXIT_RULE); + } + @VisibleForTesting public HealthyPipelineSafeModeRule getHealthyPipelineSafeModeRule() { return (HealthyPipelineSafeModeRule) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java index 02bc10ba6e4..44c77ac3de8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java @@ -36,8 +36,12 @@ public class SafeModeMetrics { // These all values will be set to some values when safemode is enabled. private @Metric MutableGaugeLong numContainerWithOneReplicaReportedThreshold; + private @Metric MutableGaugeLong + numContainerWithECDataReplicaReportedThreshold; private @Metric MutableCounterLong currentContainersWithOneReplicaReportedCount; + private @Metric MutableCounterLong + currentContainersWithECDataReplicaReportedCount; // When hdds.scm.safemode.pipeline-availability.check is set then only // below metrics will have some values, otherwise they will be zero. @@ -75,10 +79,18 @@ public void setNumContainerWithOneReplicaReportedThreshold(long val) { this.numContainerWithOneReplicaReportedThreshold.set(val); } + public void setNumContainerWithECDataReplicaReportedThreshold(long val) { + this.numContainerWithECDataReplicaReportedThreshold.set(val); + } + public void incCurrentContainersWithOneReplicaReportedCount() { this.currentContainersWithOneReplicaReportedCount.incr(); } + public void incCurrentContainersWithECDataReplicaReportedCount() { + this.currentContainersWithECDataReplicaReportedCount.incr(); + } + MutableGaugeLong getNumHealthyPipelinesThreshold() { return numHealthyPipelinesThreshold; } @@ -100,6 +112,10 @@ MutableGaugeLong getNumContainerWithOneReplicaReportedThreshold() { return numContainerWithOneReplicaReportedThreshold; } + MutableGaugeLong getNumContainerWithECDataReplicaReportedThreshold() { + return numContainerWithECDataReplicaReportedThreshold; + } + MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { return currentContainersWithOneReplicaReportedCount; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java new file mode 100644 index 00000000000..8e75f51b962 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.safemode; + + +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +/** + * Factory to create SafeMode rules. + */ +public final class SafeModeRuleFactory { + + + private static final Logger LOG = LoggerFactory.getLogger(SafeModeRuleFactory.class); + + // TODO: Move the rule names to respective rules. (HDDS-11798) + private static final String CONT_EXIT_RULE = "ContainerSafeModeRule"; + private static final String DN_EXIT_RULE = "DataNodeSafeModeRule"; + private static final String HEALTHY_PIPELINE_EXIT_RULE = + "HealthyPipelineSafeModeRule"; + private static final String ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE = + "AtleastOneDatanodeReportedRule"; + + private final ConfigurationSource config; + private final SCMContext scmContext; + private final EventQueue eventQueue; + + // TODO: Remove dependency on safeModeManager (HDDS-11797) + private final SCMSafeModeManager safeModeManager; + private final PipelineManager pipelineManager; + private final ContainerManager containerManager; + + private final List> safeModeRules; + private final List> preCheckRules; + + private static SafeModeRuleFactory instance; + + private SafeModeRuleFactory(final ConfigurationSource config, + final SCMContext scmContext, + final EventQueue eventQueue, + final SCMSafeModeManager safeModeManager, + final PipelineManager pipelineManager, + final ContainerManager containerManager) { + this.config = config; + this.scmContext = scmContext; + this.eventQueue = eventQueue; + this.safeModeManager = safeModeManager; + this.pipelineManager = pipelineManager; + this.containerManager = containerManager; + this.safeModeRules = new ArrayList<>(); + this.preCheckRules = new ArrayList<>(); + loadRules(); + } + + private void loadRules() { + // TODO: Use annotation to load the rules. (HDDS-11730) + safeModeRules.add(new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config, + containerManager, safeModeManager)); + SafeModeExitRule dnRule = new DataNodeSafeModeRule(DN_EXIT_RULE, eventQueue, config, safeModeManager); + safeModeRules.add(dnRule); + preCheckRules.add(dnRule); + + // TODO: Move isRuleEnabled check to the Rule implementation. (HDDS-11799) + if (config.getBoolean( + HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, + HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT) + && pipelineManager != null) { + + safeModeRules.add(new HealthyPipelineSafeModeRule(HEALTHY_PIPELINE_EXIT_RULE, + eventQueue, pipelineManager, safeModeManager, config, scmContext)); + safeModeRules.add(new OneReplicaPipelineSafeModeRule( + ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, eventQueue, + pipelineManager, safeModeManager, config)); + } + + } + + public static synchronized SafeModeRuleFactory getInstance() { + if (instance != null) { + return instance; + } + throw new IllegalStateException("SafeModeRuleFactory not initialized," + + " call initialize method before getInstance."); + } + + // TODO: Refactor and reduce the arguments. (HDDS-11800) + public static synchronized void initialize( + final ConfigurationSource config, + final SCMContext scmContext, + final EventQueue eventQueue, + final SCMSafeModeManager safeModeManager, + final PipelineManager pipelineManager, + final ContainerManager containerManager) { + instance = new SafeModeRuleFactory(config, scmContext, eventQueue, + safeModeManager, pipelineManager, containerManager); + } + + public List> getSafeModeRules() { + return safeModeRules; + } + + public List> getPreCheckRules() { + return preCheckRules; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java index d38a904d09c..fc7249462c4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java @@ -118,6 +118,8 @@ public class RootCARotationManager extends StatefulService { * * @param scm the storage container manager * + *

    +   * {@code
        *                         (1)   (3)(4)
        *                   --------------------------->
        *                         (2)                        scm2(Follower)
    @@ -130,8 +132,8 @@ public class RootCARotationManager extends StatefulService {
        *                   --------------------------->
        *                          (2)                       scm3(Follower)
        *                   <---------------------------
    -   *
    -   *
    +   * }
    +   * 
    * (1) Rotation Prepare * (2) Rotation Prepare Ack * (3) Rotation Commit @@ -186,7 +188,7 @@ public void notifyStatusChanged() { waitAckTask.cancel(true); } if (waitAckTimeoutTask != null) { - waitAckTask.cancel(true); + waitAckTimeoutTask.cancel(true); } if (clearPostProcessingTask != null) { clearPostProcessingTask.cancel(true); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java index 6b77350cc8c..50c7401dbb0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java @@ -158,7 +158,6 @@ public void stop() { } public static boolean isSecretKeyEnable(SecurityConfig conf) { - return conf.isSecurityEnabled() && - (conf.isBlockTokenEnabled() || conf.isContainerTokenEnabled()); + return conf.isSecurityEnabled(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 3bf8d9c55ca..e8796716fd9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -37,10 +37,8 @@ import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto.Builder; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeTransferInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; @@ -48,6 +46,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; @@ -109,6 +108,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -421,11 +421,12 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @@ -437,11 +438,12 @@ public List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @@ -453,53 +455,36 @@ public List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param factor Container factor. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override @Deprecated - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { + return listContainerInternal(startContainerID, count, state, factor, null, null); + } + + private ContainerListResult listContainerInternal(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - if (state != null) { - auditMap.put("state", state.name()); - } - if (factor != null) { - auditMap.put("factor", factor.name()); - } + Map auditMap = buildAuditMap(startContainerID, count, state, factor, replicationType, repConfig); + try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state != null) { - if (factor != null) { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted().limit(count).collect(Collectors.toList()); - } - } else { - if (factor != null) { - return scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(containerId, count); - } - } + Stream containerStream = + buildContainerStream(factor, replicationType, repConfig, getBaseContainerStream(state)); + List containerInfos = + containerStream.filter(info -> info.containerID().getId() >= startContainerID) + .sorted().collect(Collectors.toList()); + List limitedContainers = + containerInfos.stream().limit(count).collect(Collectors.toList()); + long totalCount = (long) containerInfos.size(); + return new ContainerListResult(limitedContainers, totalCount); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( @@ -513,74 +498,74 @@ public List listContainer(long startContainerID, } } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * @param repConfig Replication Config for the container. - * @return a list of pipeline. - * @throws IOException - */ - @Override - public List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, + private Stream buildContainerStream(HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); + ReplicationConfig repConfig, + Stream containerStream) { + if (factor != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor); + } else if (repConfig != null) { + // If we have repConfig filter by it, as it includes repType too. + // Otherwise, we may have a filter just for repType, eg all EC containers + // without filtering on their replication scheme + containerStream = containerStream + .filter(info -> info.getReplicationConfig().equals(repConfig)); + } else if (replicationType != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() == replicationType); + } + return containerStream; + } + + private Stream getBaseContainerStream(HddsProtos.LifeCycleState state) { + if (state != null) { + return scm.getContainerManager().getContainers(state).stream(); + } else { + return scm.getContainerManager().getContainers().stream(); + } + } + + private Map buildAuditMap(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) { + Map auditMap = new HashMap<>(); auditMap.put("startContainerID", String.valueOf(startContainerID)); auditMap.put("count", String.valueOf(count)); if (state != null) { auditMap.put("state", state.name()); } + if (factor != null) { + auditMap.put("factor", factor.name()); + } if (replicationType != null) { auditMap.put("replicationType", replicationType.toString()); } if (repConfig != null) { auditMap.put("replicationConfig", repConfig.toString()); } - try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state == null && replicationType == null && repConfig == null) { - // Not filters, so just return everything - return scm.getContainerManager().getContainers(containerId, count); - } - List containerList; - if (state != null) { - containerList = scm.getContainerManager().getContainers(state); - } else { - containerList = scm.getContainerManager().getContainers(); - } + return auditMap; + } - Stream containerStream = containerList.stream() - .filter(info -> info.containerID().getId() >= startContainerID); - // If we have repConfig filter by it, as it includes repType too. - // Otherwise, we may have a filter just for repType, eg all EC containers - // without filtering on their replication scheme - if (repConfig != null) { - containerStream = containerStream - .filter(info -> info.getReplicationConfig().equals(repConfig)); - } else if (replicationType != null) { - containerStream = containerStream - .filter(info -> info.getReplicationType() == replicationType); - } - return containerStream.sorted() - .limit(count) - .collect(Collectors.toList()); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * @param repConfig Replication Config for the container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. + * @throws IOException + */ + @Override + public ContainerListResult listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { + return listContainerInternal(startContainerID, count, state, null, replicationType, repConfig); } @Override @@ -841,6 +826,7 @@ public ScmInfo getScmInfo() { if (scm.getScmHAManager().getRatisServer() != null) { builder.setRatisPeerRoles( scm.getScmHAManager().getRatisServer().getRatisRoles()); + builder.setScmRatisEnabled(true); } else { // In case, there is no ratis, there is no ratis role. // This will just print the hostname with ratis port as the default @@ -848,6 +834,7 @@ public ScmInfo getScmInfo() { String address = scm.getSCMHANodeDetails().getLocalNodeDetails() .getRatisHostPortStr(); builder.setRatisPeerRoles(Arrays.asList(address)); + builder.setScmRatisEnabled(false); } return builder.build(); } catch (Exception ex) { @@ -1230,48 +1217,7 @@ public ContainerBalancerStatusInfoResponseProto getContainerBalancerStatusInfo() return ContainerBalancerStatusInfoResponseProto .newBuilder() .setIsRunning(true) - .setContainerBalancerStatusInfo(StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfo - .newBuilder() - .setStartedAt(balancerStatusInfo.getStartedAt().toEpochSecond()) - .setConfiguration(balancerStatusInfo.getConfiguration()) - .addAllIterationsStatusInfo( - balancerStatusInfo.getIterationsStatusInfo() - .stream() - .map( - info -> ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(info.getIterationNumber()) - .setIterationResult(Optional.ofNullable(info.getIterationResult()).orElse("")) - .setSizeScheduledForMoveGB(info.getSizeScheduledForMoveGB()) - .setDataSizeMovedGB(info.getDataSizeMovedGB()) - .setContainerMovesScheduled(info.getContainerMovesScheduled()) - .setContainerMovesCompleted(info.getContainerMovesCompleted()) - .setContainerMovesFailed(info.getContainerMovesFailed()) - .setContainerMovesTimeout(info.getContainerMovesTimeout()) - .addAllSizeEnteringNodesGB( - info.getSizeEnteringNodesGB().entrySet() - .stream() - .map(entry -> NodeTransferInfo.newBuilder() - .setUuid(entry.getKey().toString()) - .setDataVolumeGB(entry.getValue()) - .build() - ) - .collect(Collectors.toList()) - ) - .addAllSizeLeavingNodesGB( - info.getSizeLeavingNodesGB().entrySet() - .stream() - .map(entry -> NodeTransferInfo.newBuilder() - .setUuid(entry.getKey().toString()) - .setDataVolumeGB(entry.getValue()) - .build() - ) - .collect(Collectors.toList()) - ) - .build() - ) - .collect(Collectors.toList()) - ) - ) + .setContainerBalancerStatusInfo(balancerStatusInfo.toProto()) .build(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index cca2df00374..5a4dc505d84 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -289,12 +289,12 @@ public interface ContainerReport { public enum ContainerReportType { /** * Incremental container report type - * {@liks IncrementalContainerReportFromDatanode}. + * {@link IncrementalContainerReportFromDatanode}. */ ICR, /** * Full container report type - * {@liks ContainerReportFromDatanode}. + * {@link ContainerReportFromDatanode}. */ FCR } @@ -306,12 +306,20 @@ public static class ContainerReportFromDatanode extends ReportFromDatanode implements ContainerReport, IEventInfo { private long createTime = Time.monotonicNow(); + // Used to identify whether container reporting is from a registration. + private boolean isRegister = false; public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, ContainerReportsProto report) { super(datanodeDetails, report); } + public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, + ContainerReportsProto report, boolean isRegister) { + super(datanodeDetails, report); + this.isRegister = isRegister; + } + @Override public boolean equals(Object o) { return this == o; @@ -331,6 +339,10 @@ public long getCreateTime() { return createTime; } + public boolean isRegister() { + return isRegister; + } + @Override public String getEventId() { return getDatanodeDetails().toString() + ", {type: " + getType() diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index e74a83e394f..b230e3c12f7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -253,7 +253,7 @@ public SCMRegisteredResponseProto register( == SCMRegisteredResponseProto.ErrorCode.success) { eventPublisher.fireEvent(CONTAINER_REPORT, new SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode( - datanodeDetails, containerReportsProto)); + datanodeDetails, containerReportsProto, true)); eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, new NodeRegistrationContainerReport(datanodeDetails, containerReportsProto)); @@ -491,7 +491,6 @@ private static String flatten(String input) { /** * Get Key associated with Datanode address for this server. - * @return */ protected String getDatanodeAddressKey() { return this.scm.getScmNodeDetails().getDatanodeAddressKey(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java index 2b6fa032b53..5aaf4b7b485 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java @@ -70,9 +70,10 @@ public String getKerberosKeytab() { * This static class is required to support other classes * that reference the key names and also require attributes. * Example: SCMSecurityProtocol where the KerberosInfo references - * the old configuration with the annotation shown below:- - * @KerberosInfo(serverPrincipal = - * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) + * the old configuration with the annotation shown below: + *
    + * {@code KerberosInfo(serverPrincipal = + * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)} */ public static class ConfigStrings { public static final String HDDS_SCM_HTTP_AUTH_CONFIG_PREFIX = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java index de609356b22..f54ec30985b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.server; +import java.util.List; import java.util.Map; import org.apache.hadoop.hdds.annotation.InterfaceAudience; @@ -72,7 +73,7 @@ public interface SCMMXBean extends ServiceRuntimeInfo { String getClusterId(); - String getScmRatisRoles(); + List> getScmRatisRoles(); /** * Primordial node is the node on which scm init operation is performed. @@ -83,4 +84,11 @@ public interface SCMMXBean extends ServiceRuntimeInfo { String getRatisLogDirectory(); String getRocksDbDirectory(); + + /** + * Gets the SCM hostname. + * + * @return the SCM hostname for the datanode. + */ + String getHostname(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java index 88b3c887746..17318107e3d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java @@ -430,7 +430,6 @@ public String getCACertificate() throws IOException { * @param role - node role: OM/SCM/DN. * @param startSerialId - start certificate serial id. * @param count - max number of certificates returned in a batch. - * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 876c499113d..c993b933648 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -54,7 +54,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMPerformanceMetrics; import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; import org.apache.hadoop.hdds.scm.container.replication.DatanodeCommandCountUpdatedHandler; -import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager; import org.apache.hadoop.hdds.scm.ha.SCMServiceException; import org.apache.hadoop.hdds.scm.ha.BackgroundSCMService; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; @@ -172,6 +171,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.ReflectionUtils; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServer; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.JvmPauseMonitor; import org.slf4j.Logger; @@ -332,6 +332,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private Clock systemClock; private DNSToSwitchMapping dnsToSwitchMapping; + private String scmHostName; + /** * Creates a new StorageContainerManager. Configuration will be * updated with information on the actual listening addresses used @@ -456,6 +458,7 @@ private StorageContainerManager(OzoneConfiguration conf, // Emit initial safe mode status, as now handlers are registered. scmSafeModeManager.emitSafeModeStatus(); + scmHostName = HddsUtils.getHostName(conf); registerMXBean(); registerMetricsSource(this); @@ -613,7 +616,8 @@ public OzoneConfiguration getConfiguration() { * @param conf HDDS configuration * @param configurator SCM configurator * @return SCM instance - * @throws IOException, AuthenticationException + * @throws IOException on Failure, + * @throws AuthenticationException */ public static StorageContainerManager createSCM( OzoneConfiguration conf, SCMConfigurator configurator) @@ -626,7 +630,8 @@ public static StorageContainerManager createSCM( * * @param conf HDDS configuration * @return SCM instance - * @throws IOException, AuthenticationException + * @throws IOException on Failure, + * @throws AuthenticationException */ public static StorageContainerManager createSCM(OzoneConfiguration conf) throws IOException, AuthenticationException { @@ -824,10 +829,6 @@ private void initializeSystemManagers(OzoneConfiguration conf, if (configurator.getReplicationManager() != null) { replicationManager = configurator.getReplicationManager(); } else { - LegacyReplicationManager legacyRM = new LegacyReplicationManager( - conf, containerManager, containerPlacementPolicy, eventQueue, - scmContext, scmNodeManager, scmHAManager, systemClock, - getScmMetadataStore().getMoveTable()); replicationManager = new ReplicationManager( conf, containerManager, @@ -837,7 +838,6 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmContext, scmNodeManager, systemClock, - legacyRM, containerReplicaPendingOps); reconfigurationHandler.register(replicationManager.getConfig()); } @@ -1597,8 +1597,9 @@ public void start() throws IOException { setStartTime(); - // At this point leader is not known - scmHAMetricsUpdate(null); + RaftPeerId leaderId = SCMHAUtils.isSCMHAEnabled(configuration) + ? getScmHAManager().getRatisServer().getLeaderId() : null; + scmHAMetricsUpdate(Objects.toString(leaderId, null)); if (scmCertificateClient != null) { // In case root CA certificate is rotated during this SCM is offline @@ -1618,8 +1619,7 @@ private void persistSCMCertificates() throws IOException { if (primaryScmNodeId != null && !primaryScmNodeId.equals( scmStorageConfig.getScmId())) { List pemEncodedCerts = - scmCertificateClient.listCA(); - + getScmSecurityClientWithMaxRetry(configuration, getCurrentUser()).listCACertificate(); // Write the primary SCM CA and Root CA during startup. for (String cert : pemEncodedCerts) { X509Certificate x509Certificate = CertificateCodec.getX509Certificate( @@ -2066,6 +2066,10 @@ public StatefulServiceStateManager getStatefulServiceStateManager() { return statefulServiceStateManager; } + @Override + public String getNamespace() { + return scmHANodeDetails.getLocalNodeDetails().getServiceId(); + } /** * Get the safe mode status of all rules. * @@ -2137,10 +2141,54 @@ public ContainerTokenGenerator getContainerTokenGenerator() { } @Override - public String getScmRatisRoles() { + public List> getScmRatisRoles() { final SCMRatisServer server = getScmHAManager().getRatisServer(); - return server != null ? - HddsUtils.format(server.getRatisRoles()) : "STANDALONE"; + + // If Ratis is disabled + if (server == null) { + return getRatisRolesException("Ratis is disabled"); + } + + // To attempt to find the SCM Leader, + // and if the Leader is not found + // return Leader is not found message. + RaftServer.Division division = server.getDivision(); + RaftPeerId leaderId = division.getInfo().getLeaderId(); + if (leaderId == null) { + return getRatisRolesException("No leader found"); + } + + // If the SCMRatisServer is stopped, return a service stopped message. + if (server.isStopped()) { + return getRatisRolesException("Server is shutting down"); + } + + // Attempt to retrieve role information. + try { + List ratisRoles = server.getRatisRoles(); + List> result = new ArrayList<>(); + for (String role : ratisRoles) { + String[] roleArr = role.split(":"); + List scmInfo = new ArrayList<>(); + // Host Name + scmInfo.add(roleArr[0]); + // Node ID + scmInfo.add(roleArr[3]); + // Ratis Port + scmInfo.add(roleArr[1]); + // Role + scmInfo.add(roleArr[2]); + result.add(scmInfo); + } + return result; + } catch (Exception e) { + LOG.error("Failed to getRatisRoles.", e); + return getRatisRolesException("Exception Occurred, " + e.getMessage()); + } + } + + private static List> getRatisRolesException(String exceptionString) { + return Collections.singletonList(Collections.singletonList(exceptionString)); } /** @@ -2177,6 +2225,11 @@ public String getRocksDbDirectory() { return String.valueOf(ServerUtils.getScmDbDir(configuration)); } + @Override + public String getHostname() { + return scmHostName; + } + public Collection getScmAdminUsernames() { return scmAdmins.getAdminUsernames(); } @@ -2253,7 +2306,6 @@ public void scmHAMetricsUpdate(String leaderId) { // unregister, in case metrics already exist // so that the metric tags will get updated. SCMHAMetrics.unRegister(); - scmHAMetrics = SCMHAMetrics.create(getScmId(), leaderId); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java index a3ea5189c3d..520a550e293 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java @@ -183,7 +183,6 @@ public void onLeaderReady() { /** * Builds a {@link FinalizationManagerImpl}. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private OzoneConfiguration conf; private HDDSLayoutVersionManager versionManager; @@ -196,14 +195,14 @@ public Builder() { executor = new DefaultUpgradeFinalizationExecutor<>(); } - public Builder setConfiguration(OzoneConfiguration conf) { - this.conf = conf; + public Builder setConfiguration(OzoneConfiguration configuration) { + this.conf = configuration; return this; } public Builder setLayoutVersionManager( - HDDSLayoutVersionManager versionManager) { - this.versionManager = versionManager; + HDDSLayoutVersionManager layoutVersionManager) { + this.versionManager = layoutVersionManager; return this; } @@ -212,8 +211,8 @@ public Builder setStorage(SCMStorageConfig storage) { return this; } - public Builder setHAManager(SCMHAManager scmHAManager) { - this.scmHAManager = scmHAManager; + public Builder setHAManager(SCMHAManager haManager) { + this.scmHAManager = haManager; return this; } @@ -224,8 +223,8 @@ public Builder setFinalizationStore( } public Builder setFinalizationExecutor( - UpgradeFinalizationExecutor executor) { - this.executor = executor; + UpgradeFinalizationExecutor finalizationExecutor) { + this.executor = finalizationExecutor; return this; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java index 95c85a4c744..d166f8774f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java @@ -80,7 +80,6 @@ public SCMStorageConfig getStorage() { /** * Builds an {@link SCMUpgradeFinalizationContext}. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private PipelineManager pipelineManager; private NodeManager nodeManager; @@ -120,13 +119,13 @@ public Builder setStorage(SCMStorageConfig storage) { } public Builder setLayoutVersionManager( - HDDSLayoutVersionManager versionManager) { - this.versionManager = versionManager; + HDDSLayoutVersionManager layoutVersionManager) { + this.versionManager = layoutVersionManager; return this; } - public Builder setConfiguration(OzoneConfiguration conf) { - this.conf = conf; + public Builder setConfiguration(OzoneConfiguration configuration) { + this.conf = configuration; return this; } diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 3f825d4e25f..7bfe405850e 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -110,6 +110,114 @@

    Space Statistics

    Namespace:{{$ctrl.jmx.Namespace}}
    Started: {{$ctrl.jmx.StartedTimeInMillis | date : 'medium'}}
    +

    Pipeline Statistics

    + + + + + + + + + + + + + + + + + + + + + + + +
    Pipeline StateSize
    Closed{{statistics.pipelines.closed}}
    Allocated{{statistics.pipelines.allocated}}
    Open{{statistics.pipelines.open}}
    Dormant{{statistics.pipelines.dormant}}
    + +

    Container Statistics

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Operational StateSize
    Open{{statistics.containers.lifecycle.open}}
    Closing{{statistics.containers.lifecycle.closing}}
    Quasi Closed{{statistics.containers.lifecycle.quasi_closed}}
    Closed{{statistics.containers.lifecycle.closed}}
    Deleting{{statistics.containers.lifecycle.deleting}}
    Deleted{{statistics.containers.lifecycle.deleted}}
    Recovering{{statistics.containers.lifecycle.recovering}}
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    HealthSize
    Under Replicated{{statistics.containers.health.under_replicated}}
    Mis Replicated{{statistics.containers.health.mis_replicated}}
    Over Replicated{{statistics.containers.health.over_replicated}}
    Missing{{statistics.containers.health.missing}}
    Unhealthy{{statistics.containers.health.unhealthy}}
    Empty{{statistics.containers.health.empty}}
    Open Unhealthy{{statistics.containers.health.open_unhealthy}}
    Quasi Closed Stuck{{statistics.containers.health.quasi_closed_stuck}}
    Open Without Pipeline{{statistics.containers.health.open_without_pipeline}}
    +

    Node Status

    @@ -140,6 +248,10 @@

    Node Status

    'sortdesc':(columnName == 'comstate' && !reverse)}">Commisioned State Last Heartbeat + UUID + Version @@ -157,6 +269,8 @@

    Node Status

    {{typestat.opstate}} {{typestat.comstate}} {{typestat.lastheartbeat}} + {{typestat.uuid}} + {{typestat.version}} @@ -210,10 +324,6 @@

    Status

    Force Exit Safe Mode {{$ctrl.overview.jmx.SafeModeExitForceful}} - - SCM Roles (HA) - {{$ctrl.overview.jmx.ScmRatisRoles}} - Primordial Node (HA) {{$ctrl.overview.jmx.PrimordialNode}} @@ -235,6 +345,35 @@

    Meta-Data Volume Information

    +

    SCM Roles (HA)

    +

    {{$ctrl.overview.jmx.ScmRatisRoles[0][0]}}

    +
    + + + + + + + + + + + + + + + + + + + + + + + +
    Host NameNode IDRatis PortRole
    {{roles[0]}}{{roles[1]}}{{roles[2]}}{{roles[3]}}
    {{roles[0]}}{{roles[1]}}{{roles[2]}}{{roles[3]}}
    +
    +

    Safemode rules statuses

    @@ -248,7 +387,7 @@

    Safemode rules statuses

    - + diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js index 6fac6849530..eca79852e43 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js @@ -24,7 +24,7 @@ require: { overview: "^overview" }, - controller: function ($http,$scope) { + controller: function ($http,$scope,$sce) { var ctrl = this; $scope.reverse = false; $scope.columnName = "hostname"; @@ -53,9 +53,42 @@ remaining : "N/A", nonscmused : "N/A" } + }, + pipelines : { + closed : "N/A", + allocated : "N/A", + open : "N/A", + dormant : "N/A" + }, + containers : { + lifecycle : { + open : "N/A", + closing : "N/A", + quasi_closed : "N/A", + closed : "N/A", + deleting : "N/A", + deleted : "N/A", + recovering : "N/A" + }, + health : { + under_replicated : "N/A", + mis_replicated : "N/A", + over_replicated : "N/A", + missing : "N/A", + unhealthy : "N/A", + empty : "N/A", + open_unhealthy : "N/A", + quasi_closed_stuck : "N/A", + open_without_pipeline : "N/A" + } } } + $http.get("jmx?qry=Ratis:service=RaftServer,group=*,id=*") + .then(function (result) { + ctrl.role = result.data.beans[0]; + }); + function get_protocol(URLScheme, value, baseProto, fallbackProto) { let protocol = "unknown" let port = -1; @@ -95,6 +128,8 @@ capacity: value && value.find((element) => element.key === "CAPACITY").value, comstate: value && value.find((element) => element.key === "COMSTATE").value, lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value, + uuid: value && value.find((element) => element.key === "UUID").value, + version: value && value.find((element) => element.key === "VERSION").value, port: portSpec.port, protocol: portSpec.proto } @@ -105,6 +140,14 @@ $scope.lastIndex = Math.ceil(nodeStatusCopy.length / $scope.RecordsToDisplay); $scope.nodeStatus = nodeStatusCopy.slice(0, $scope.RecordsToDisplay); + $scope.formatValue = function(value) { + if (value && value.includes(';')) { + return $sce.trustAsHtml(value.replace('/;/g', '
    ')); + } else { + return $sce.trustAsHtml(value); + } + }; + ctrl.nodemanagermetrics.NodeStatistics.forEach(({key, value}) => { if(key == "Min") { $scope.statistics.nodes.usages.min = value; @@ -135,6 +178,46 @@ } }); }); + + $http.get("jmx?qry=Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo") + .then(function (result) { + const URLScheme = location.protocol.replace(":" , ""); + ctrl.scmpipelinemanager = result.data.beans[0]; + ctrl.scmpipelinemanager.PipelineInfo.forEach(({key, value}) => { + if(key == "CLOSED") { + $scope.statistics.pipelines.closed = value; + } else if(key == "ALLOCATED") { + $scope.statistics.pipelines.allocated = value; + } else if(key == "OPEN") { + $scope.statistics.pipelines.open = value; + } else if(key == "DORMANT") { + $scope.statistics.pipelines.dormant = value; + } + }); + }); + + $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=ReplicationManagerMetrics") + .then(function (result) { + const URLScheme = location.protocol.replace(":" , ""); + ctrl.scmcontainermanager = result.data.beans[0]; + $scope.statistics.containers.lifecycle.open = ctrl.scmcontainermanager.OpenContainers; + $scope.statistics.containers.lifecycle.closing = ctrl.scmcontainermanager.ClosingContainers; + $scope.statistics.containers.lifecycle.quasi_closed = ctrl.scmcontainermanager.QuasiClosedContainers; + $scope.statistics.containers.lifecycle.closed = ctrl.scmcontainermanager.ClosedContainers; + $scope.statistics.containers.lifecycle.deleting = ctrl.scmcontainermanager.DeletingContainers; + $scope.statistics.containers.lifecycle.deleted = ctrl.scmcontainermanager.DeletedContainers; + $scope.statistics.containers.lifecycle.recovering = ctrl.scmcontainermanager.RecoveringContainers; + $scope.statistics.containers.health.under_replicated = ctrl.scmcontainermanager.UnderReplicatedContainers; + $scope.statistics.containers.health.mis_replicated = ctrl.scmcontainermanager.MisReplicatedContainers; + $scope.statistics.containers.health.over_replicated = ctrl.scmcontainermanager.OverReplicatedContainers; + $scope.statistics.containers.health.missing = ctrl.scmcontainermanager.MissingContainers; + $scope.statistics.containers.health.unhealthy = ctrl.scmcontainermanager.UnhealthyContainers; + $scope.statistics.containers.health.empty = ctrl.scmcontainermanager.EmptyContainers; + $scope.statistics.containers.health.open_unhealthy = ctrl.scmcontainermanager.OpenUnhealthyContainers; + $scope.statistics.containers.health.quasi_closed_stuck = ctrl.scmcontainermanager.StuckQuasiClosedContainers; + $scope.statistics.containers.health.open_without_pipeline = ctrl.scmcontainermanager.OpenContainersWithoutPipeline; + }); + /*if option is 'All' display all records else display specified record on page*/ $scope.UpdateRecordsToShow = () => { if($scope.RecordsToDisplay == 'All') { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index fe5459764c9..787f83e1a83 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -828,10 +828,36 @@ public static Pipeline getRandomPipeline() { */ public static List getContainerInfo(int numContainers) { List containerInfoList = new ArrayList<>(); + RatisReplicationConfig ratisReplicationConfig = + RatisReplicationConfig.getInstance(ReplicationFactor.THREE); for (int i = 0; i < numContainers; i++) { ContainerInfo.Builder builder = new ContainerInfo.Builder(); containerInfoList.add(builder .setContainerID(RandomUtils.nextLong()) + .setReplicationConfig(ratisReplicationConfig) + .build()); + } + return containerInfoList; + } + + /** + * Generate EC Container data. + * + * @param numContainers number of ContainerInfo to be included in list. + * @param data Data block Num. + * @param parity Parity block Num. + * @return {@literal List} + */ + public static List getECContainerInfo(int numContainers, int data, int parity) { + List containerInfoList = new ArrayList<>(); + ECReplicationConfig eCReplicationConfig = new ECReplicationConfig(data, parity); + for (int i = 0; i < numContainers; i++) { + ContainerInfo.Builder builder = new ContainerInfo.Builder(); + containerInfoList.add(builder + .setContainerID(RandomUtils.nextLong()) + .setOwner("test-owner") + .setPipelineID(PipelineID.randomId()) + .setReplicationConfig(eCReplicationConfig) .build()); } return containerInfoList; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 6438b6f8d49..621c9297e7e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -22,15 +22,15 @@ import java.time.Clock; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.ArrayList; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -39,30 +39,30 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; -import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; -import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; -import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.ha.SCMHAManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; +import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; @@ -76,21 +76,19 @@ import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; import org.apache.ozone.test.GenericTestUtils; - -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.apache.hadoop.ozone.OzoneConsts.MB; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; - import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MB; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests for SCM Block Manager. @@ -273,7 +271,7 @@ void testAllocateBlockInParallel() throws Exception { } CompletableFuture - .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) + .allOf(futureList.toArray(new CompletableFuture[0])) .get(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 03500529ff9..2a012cbe180 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -265,7 +265,7 @@ private void commitTransactions( List transactionResults) throws IOException { commitTransactions(transactionResults, - dnList.toArray(new DatanodeDetails[3])); + dnList.toArray(new DatanodeDetails[0])); } private void commitTransactions( @@ -441,6 +441,75 @@ public void testResetCount() throws Exception { assertEquals(30 * THREE, blocks.size()); } + + @Test + public void testSCMDelIteratorProgress() throws Exception { + int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); + + // CASE1: When all transactions are valid and available + // Create 8 TXs in the log. + int noOfTransactions = 8; + addTransactions(generateData(noOfTransactions), true); + mockContainerHealthResult(true); + List blocks; + + List txIDs = new ArrayList<>(); + int i = 1; + while (i < noOfTransactions) { + // In each iteration read two transaction, API returns all the transactions in order. + // 1st iteration: {1, 2} + // 2nd iteration: {3, 4} + // 3rd iteration: {5, 6} + // 4th iteration: {7, 8} + blocks = getTransactions(2 * BLOCKS_PER_TXN * THREE); + assertEquals(blocks.get(0).getTxID(), i++); + assertEquals(blocks.get(1).getTxID(), i++); + } + + // CASE2: When some transactions are not available for delete in the current iteration, + // either due to max retry reach or some other issue. + // New transactions Id is { 9, 10, 11, 12, 13, 14, 15, 16} + addTransactions(generateData(noOfTransactions), true); + mockContainerHealthResult(true); + + // Mark transaction Id 11 as reached max retry count so that it will be ignored + // by scm deleting service while fetching transaction for delete + int ignoreTransactionId = 11; + txIDs.add((long) ignoreTransactionId); + for (i = 0; i < maxRetry; i++) { + incrementCount(txIDs); + } + incrementCount(txIDs); + + i = 9; + while (true) { + // In each iteration read two transaction. + // If any transaction which is not available for delete in the current iteration, + // it will be ignored and will be re-checked again only after complete table is read. + // 1st iteration: {9, 10} + // 2nd iteration: {12, 13} Transaction 11 is ignored here + // 3rd iteration: {14, 15} Transaction 11 is available here, + // but it will be read only when all db records are read till the end. + // 4th iteration: {16, 11} Since iterator reached at the end of table after reading transaction 16, + // Iterator starts from beginning again, and it returns transaction 11 as well + blocks = getTransactions(2 * BLOCKS_PER_TXN * THREE); + if (i == ignoreTransactionId) { + i++; + } + assertEquals(blocks.get(0).getTxID(), i++); + if (i == 17) { + assertEquals(blocks.get(1).getTxID(), ignoreTransactionId); + break; + } + assertEquals(blocks.get(1).getTxID(), i++); + + if (i == 14) { + // Reset transaction 11 so that it will be available in scm key deleting service in the subsequent iterations. + resetCount(txIDs); + } + } + } + @Test public void testCommitTransactions() throws Exception { deletedBlockLog.setScmCommandTimeoutMs(Long.MAX_VALUE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 83791c3257d..5e951a6d680 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -77,8 +77,7 @@ public class TestContainerManagerImpl { @BeforeEach void setUp() throws Exception { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); nodeManager = new MockNodeManager(true, 10); sequenceIdGen = new SequenceIdGenerator( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index f7a731fe117..94b9b2f3f2b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -100,8 +100,7 @@ void setup() throws IOException, InvalidStateTransitionException { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); nodeManager = new MockNodeManager(true, 10); containerManager = mock(ContainerManager.class); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java index a7043d02642..157a65c7014 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java @@ -75,8 +75,7 @@ public void init() throws IOException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); scmhaManager = SCMHAManagerStub.getInstance(true); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); pipelineManager = mock(PipelineManager.class); pipeline = Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED) .setId(PipelineID.randomId()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index 9abbda81934..314cb02ad72 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -130,8 +130,7 @@ public void setup() throws IOException, InvalidStateTransitionException, new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap, scmContext, versionManager); scmhaManager = SCMHAManagerStub.getInstance(true); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); @@ -785,7 +784,7 @@ public void testECReplicaIndexValidation() throws NodeNotFoundException, IOException, TimeoutException { List dns = IntStream.range(0, 5) .mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList()); - dns.stream().forEach(dn -> nodeManager.register(dn, null, null)); + dns.forEach(dn -> nodeManager.register(dn, null, null)); ECReplicationConfig replicationConfig = new ECReplicationConfig(3, 2); final ContainerInfo container = getECContainer(LifeCycleState.CLOSED, PipelineID.randomId(), replicationConfig); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java index 9ea4ea45b56..a573573a67b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java @@ -78,8 +78,7 @@ public void setup() throws IOException { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); this.nodeManager = new MockNodeManager(true, 10); this.containerManager = mock(ContainerManager.class); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfigBuilder.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfigBuilder.java new file mode 100644 index 00000000000..fc4bc9fb05c --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfigBuilder.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +class ContainerBalancerConfigBuilder { + private static final int DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER = 15; + + private final ContainerBalancerConfiguration config; + + ContainerBalancerConfigBuilder(int nodeCount) { + this(new OzoneConfiguration(), nodeCount); + } + + ContainerBalancerConfigBuilder(OzoneConfiguration ozoneConfig, int nodeCount) { + config = ozoneConfig.getObject(ContainerBalancerConfiguration.class); + config.setIterations(1); + config.setThreshold(10); + config.setMaxSizeToMovePerIteration(50 * TestContainerBalancerTask.STORAGE_UNIT); + config.setMaxSizeEnteringTarget(50 * TestContainerBalancerTask.STORAGE_UNIT); + if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + } + + ContainerBalancerConfiguration build() { + return config; + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java index a3ec55d5863..d453fb6ca81 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java @@ -86,7 +86,7 @@ public MockedSCM(@Nonnull TestableCluster testableCluster) { } } - private void init(@Nonnull ContainerBalancerConfiguration balancerConfig, @Nonnull OzoneConfiguration ozoneCfg) { + void init(@Nonnull ContainerBalancerConfiguration balancerConfig, @Nonnull OzoneConfiguration ozoneCfg) { ozoneCfg.setFromObject(balancerConfig); try { doMock(balancerConfig, ozoneCfg); @@ -137,17 +137,36 @@ public String toString() { return task; } + public @Nonnull ContainerBalancerTask startBalancerTaskAsync( + @Nonnull ContainerBalancer containerBalancer, + @Nonnull ContainerBalancerConfiguration config, + Boolean withDelay) { + ContainerBalancerTask task = new ContainerBalancerTask(scm, 0, containerBalancer, + containerBalancer.getMetrics(), config, withDelay); + new Thread(task).start(); + return task; + } + public @Nonnull ContainerBalancerTask startBalancerTask(@Nonnull ContainerBalancerConfiguration config) { init(config, new OzoneConfiguration()); return startBalancerTask(new ContainerBalancer(scm), config); } - public void enableLegacyReplicationManager() { - mockedReplicaManager.conf.setEnableLegacy(true); + public @Nonnull ContainerBalancerTask startBalancerTaskAsync(@Nonnull ContainerBalancerConfiguration config, + OzoneConfiguration ozoneConfig, + Boolean withDelay) { + init(config, ozoneConfig); + return startBalancerTaskAsync(new ContainerBalancer(scm), config, withDelay); } - public void disableLegacyReplicationManager() { - mockedReplicaManager.conf.setEnableLegacy(false); + public @Nonnull ContainerBalancerTask startBalancerTaskAsync(@Nonnull ContainerBalancerConfiguration config, + Boolean withDelay) { + init(config, new OzoneConfiguration()); + return startBalancerTaskAsync(new ContainerBalancer(scm), config, withDelay); + } + + public int getNodeCount() { + return cluster.getNodeCount(); } public @Nonnull MoveManager getMoveManager() { @@ -235,9 +254,6 @@ private static final class MockedReplicationManager { private MockedReplicationManager() { manager = mock(ReplicationManager.class); conf = new ReplicationManager.ReplicationManagerConfiguration(); - // Disable LegacyReplicationManager. This means balancer should select RATIS as well as - // EC containers for balancing. Also, MoveManager will be used. - conf.setEnableLegacy(false); } private static @Nonnull MockedReplicationManager doMock() @@ -252,13 +268,6 @@ private MockedReplicationManager() { .when(mockedManager.manager.isContainerReplicatingOrDeleting(Mockito.any(ContainerID.class))) .thenReturn(false); - Mockito - .when(mockedManager.manager.move( - Mockito.any(ContainerID.class), - Mockito.any(DatanodeDetails.class), - Mockito.any(DatanodeDetails.class))) - .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED)); - Mockito .when(mockedManager.manager.getClock()) .thenReturn(Clock.system(ZoneId.systemDefault())); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java index 662322b42f5..c7792887471 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java @@ -44,6 +44,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertSame; @@ -257,6 +258,22 @@ public void testDelayedStartOnSCMStatusChange() stopBalancer(); } + @Test + public void testGetBalancerStatusInfo() throws Exception { + startBalancer(balancerConfiguration); + assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus()); + + // Assert the configuration fields that were explicitly set + ContainerBalancerStatusInfo status = containerBalancer.getBalancerStatusInfo(); + assertEquals(balancerConfiguration.getThreshold(), + Double.parseDouble(status.getConfiguration().getUtilizationThreshold())); + assertEquals(balancerConfiguration.getIterations(), status.getConfiguration().getIterations()); + assertEquals(balancerConfiguration.getTriggerDuEnable(), status.getConfiguration().getTriggerDuBeforeMoveEnable()); + + stopBalancer(); + assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus()); + } + private void startBalancer(ContainerBalancerConfiguration config) throws IllegalContainerBalancerStateException, IOException, InvalidContainerBalancerConfigurationException, TimeoutException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java index 7a8f655f067..2e44c3b4a5d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java @@ -73,7 +73,6 @@ */ public class TestContainerBalancerDatanodeNodeLimit { private static final long STORAGE_UNIT = OzoneConsts.GB; - private static final int DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER = 15; @BeforeAll public static void setup() { @@ -103,13 +102,8 @@ private static Stream createMockedSCMs() { @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(nodeCount).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); ContainerBalancerMetrics metrics = task.getMetrics(); @@ -129,12 +123,8 @@ public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit(@Nonnull Mocke public void balancerShouldObeyMaxSizeEnteringTargetLimit(@Nonnull MockedSCM mockedSCM) { OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set("ozone.scm.container.size", "1MB"); - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(ozoneConfig, nodeCount).build(); // No containers should be selected when the limit is just 2 MB. config.setMaxSizeEnteringTarget(2 * OzoneConsts.MB); @@ -147,11 +137,7 @@ public void balancerShouldObeyMaxSizeEnteringTargetLimit(@Nonnull MockedSCM mock assertEquals(0, task.getSizeScheduledForMoveInLatestIteration()); // Some containers should be selected when using default values. - ContainerBalancerConfiguration balancerConfig = balancerConfigByOzoneConfig(new OzoneConfiguration()); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - balancerConfig.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - balancerConfig.setIterations(1); + ContainerBalancerConfiguration balancerConfig = new ContainerBalancerConfigBuilder(nodeCount).build(); task = mockedSCM.startBalancerTask(balancerConfig); // Balancer should have identified unbalanced nodes. @@ -167,13 +153,9 @@ public void balancerShouldObeyMaxSizeEnteringTargetLimit(@Nonnull MockedSCM mock public void balancerShouldObeyMaxSizeLeavingSourceLimit(@Nonnull MockedSCM mockedSCM) { OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set("ozone.scm.container.size", "1MB"); - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(ozoneConfig, nodeCount).build(); + // No source containers should be selected when the limit is just 2 MB. config.setMaxSizeLeavingSource(2 * OzoneConsts.MB); @@ -186,13 +168,9 @@ public void balancerShouldObeyMaxSizeLeavingSourceLimit(@Nonnull MockedSCM mocke assertEquals(0, task.getSizeScheduledForMoveInLatestIteration()); // Some containers should be selected when using default values. - ContainerBalancerConfiguration newBalancerConfig = balancerConfigByOzoneConfig(new OzoneConfiguration()); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - newBalancerConfig.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - newBalancerConfig.setIterations(1); + ContainerBalancerConfiguration balancerConfig = new ContainerBalancerConfigBuilder(ozoneConfig, nodeCount).build(); - task = mockedSCM.startBalancerTask(newBalancerConfig); + task = mockedSCM.startBalancerTask(balancerConfig); // Balancer should have identified unbalanced nodes. assertTrue(stillHaveUnbalancedNodes(task)); // ContainerToSourceMap is not empty due to some containers should be selected @@ -208,18 +186,10 @@ public void balancerShouldObeyMaxSizeLeavingSourceLimit(@Nonnull MockedSCM mocke @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void initializeIterationShouldUpdateUnBalancedNodesWhenThresholdChanges(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setThreshold(10); - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); // check for random threshold values - for (int i = 0; i < 50; i++) { + for (int i = 0; i < 10; i++) { double randomThreshold = RANDOM.nextDouble() * 100; List expectedUnBalancedNodes = mockedSCM.getCluster().getUnBalancedNodes(randomThreshold); config.setThreshold(randomThreshold); @@ -255,39 +225,19 @@ public void testCalculationOfUtilization(@Nonnull MockedSCM mockedSCM) { @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void testBalancerWithMoveManager(@Nonnull MockedSCM mockedSCM) - throws IOException, NodeNotFoundException, TimeoutException { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setThreshold(10); - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + throws IOException, NodeNotFoundException { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); - mockedSCM.disableLegacyReplicationManager(); mockedSCM.startBalancerTask(config); verify(mockedSCM.getMoveManager(), atLeastOnce()). move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class)); - - verify(mockedSCM.getReplicationManager(), times(0)) - .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class)); } @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setThreshold(10); - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setThreshold(99.99); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -302,14 +252,8 @@ public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced(@Nonnull Mocke public void testMetrics(@Nonnull MockedSCM mockedSCM) throws IOException, NodeNotFoundException { OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set("hdds.datanode.du.refresh.period", "1ms"); - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setBalancingInterval(Duration.ofMillis(2)); - config.setThreshold(10); - config.setIterations(1); config.setMaxSizeEnteringTarget(6 * STORAGE_UNIT); // deliberately set max size per iteration to a low value, 6 GB config.setMaxSizeToMovePerIteration(6 * STORAGE_UNIT); @@ -338,15 +282,7 @@ public void testMetrics(@Nonnull MockedSCM mockedSCM) throws IOException, NodeNo @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerBalancerShouldSelectOnlyClosedContainers(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); Map cidToInfoMap = mockedSCM.getCluster().getCidToInfoMap(); // Make all containers open, balancer should not select any of them @@ -380,15 +316,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers(@Nonnull MockedSCM @MethodSource("createMockedSCMs") public void balancerShouldNotSelectNonClosedContainerReplicas(@Nonnull MockedSCM mockedSCM) throws ContainerNotFoundException { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); // Let's mock such that all replicas have CLOSING state Map> cidToReplicasMap = mockedSCM.getCluster().getCidToReplicasMap(); @@ -418,12 +346,7 @@ public void balancerShouldNotSelectNonClosedContainerReplicas(@Nonnull MockedSCM @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerBalancerShouldObeyMaxSizeToMoveLimit(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setThreshold(1); config.setMaxSizeToMovePerIteration(10 * STORAGE_UNIT); config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); @@ -441,15 +364,7 @@ public void containerBalancerShouldObeyMaxSizeToMoveLimit(@Nonnull MockedSCM moc @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void targetDatanodeShouldNotAlreadyContainSelectedContainer(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -468,15 +383,7 @@ public void targetDatanodeShouldNotAlreadyContainSelectedContainer(@Nonnull Mock @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerMoveSelectionShouldFollowPlacementPolicy(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -511,15 +418,7 @@ public void containerMoveSelectionShouldFollowPlacementPolicy(@Nonnull MockedSCM @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void targetDatanodeShouldBeInServiceHealthy(@Nonnull MockedSCM mockedSCM) throws NodeNotFoundException { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -534,51 +433,25 @@ public void targetDatanodeShouldBeInServiceHealthy(@Nonnull MockedSCM mockedSCM) @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void selectedContainerShouldNotAlreadyHaveBeenSelected(@Nonnull MockedSCM mockedSCM) - throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); - - mockedSCM.enableLegacyReplicationManager(); - - ContainerBalancerTask task = mockedSCM.startBalancerTask(config); - int numContainers = task.getContainerToTargetMap().size(); + throws NodeNotFoundException, ContainerNotFoundException, ContainerReplicaNotFoundException { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); /* Assuming move is called exactly once for each unique container, number of calls to move should equal number of unique containers. If number of calls to move is more than number of unique containers, at least one container has been re-selected. It's expected that number of calls to move should equal number of unique, selected containers (from containerToTargetMap). */ - verify(mockedSCM.getReplicationManager(), times(numContainers)) - .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class)); - - // Try the same test by disabling LegacyReplicationManager so that MoveManager is used. - mockedSCM.disableLegacyReplicationManager(); ContainerBalancerTask nextTask = mockedSCM.startBalancerTask(config); - numContainers = nextTask.getContainerToTargetMap().size(); + int numContainers = nextTask.getContainerToTargetMap().size(); verify(mockedSCM.getMoveManager(), times(numContainers)) - .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class)); + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class)); } @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void balancerShouldNotSelectConfiguredExcludeContainers(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setExcludeContainers("1, 4, 5"); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -589,6 +462,108 @@ public void balancerShouldNotSelectConfiguredExcludeContainers(@Nonnull MockedSC } } + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void checkIterationResult(@Nonnull MockedSCM mockedSCM) { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + // According to the setup and configurations, this iteration's result should be ITERATION_COMPLETED. + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + + // Now, limit maxSizeToMovePerIteration but fail all container moves. + // The result should still be ITERATION_COMPLETED. + config.setMaxSizeToMovePerIteration(10 * STORAGE_UNIT); + + task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + + //Try the same but use MoveManager for container move instead of legacy RM. + task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + } + + /** + * Tests the situation where some container moves time out because they take longer than "move.timeout". + */ + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void checkIterationResultTimeout(@Nonnull MockedSCM mockedSCM) + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + config.setMoveTimeout(Duration.ofMillis(50)); + + CompletableFuture completedFuture = + CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED); + /* + The first move being 10ms falls within the timeout duration of 500ms. It should be successful. The rest should fail. + */ + when(mockedSCM.getMoveManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(completedFuture) + .thenAnswer(invocation -> genCompletableFuture(150)); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + assertEquals(1, task.getMetrics().getNumContainerMovesCompletedInLatestIteration()); + assertThat(task.getMetrics().getNumContainerMovesTimeoutInLatestIteration()).isGreaterThanOrEqualTo(1); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void checkIterationResultTimeoutFromReplicationManager(@Nonnull MockedSCM mockedSCM) + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + config.setMoveTimeout(Duration.ofMillis(500)); + + CompletableFuture future = + CompletableFuture.supplyAsync(() -> MoveManager.MoveResult.REPLICATION_FAIL_TIME_OUT); + CompletableFuture future2 = + CompletableFuture.supplyAsync(() -> MoveManager.MoveResult.DELETION_FAIL_TIME_OUT); + + when(mockedSCM.getMoveManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(future).thenAnswer(invocation -> future2); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + assertThat(task.getMetrics().getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(0); + assertEquals(0, task.getMetrics().getNumContainerMovesCompletedInLatestIteration()); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + @Flaky("HDDS-11855") + public void checkIterationResultException(@Nonnull MockedSCM mockedSCM) + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + config.setMoveTimeout(Duration.ofMillis(500)); + + CompletableFuture future = new CompletableFuture<>(); + future.completeExceptionally(new RuntimeException("Runtime Exception")); + + int expectedMovesFailed = (nodeCount > 6) ? 3 : 1; + // Try the same test but with MoveManager instead of ReplicationManager. + when(mockedSCM.getMoveManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(genCompletableFutureWithException(1)) + .thenThrow(new ContainerNotFoundException("Test Container not found")) + .thenReturn(future); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + assertThat(task.getMetrics().getNumContainerMovesFailed()).isGreaterThanOrEqualTo(expectedMovesFailed); + } + public static List getUnBalancedNodes(@Nonnull ContainerBalancerTask task) { ArrayList result = new ArrayList<>(); result.addAll(task.getOverUtilizedNodes()); @@ -604,9 +579,24 @@ private static boolean stillHaveUnbalancedNodes(@Nonnull ContainerBalancerTask t return new MockedSCM(new TestableCluster(datanodeCount, STORAGE_UNIT)); } - private static @Nonnull ContainerBalancerConfiguration balancerConfigByOzoneConfig( - @Nonnull OzoneConfiguration ozoneConfiguration - ) { - return ozoneConfiguration.getObject(ContainerBalancerConfiguration.class); + private static CompletableFuture genCompletableFuture(int sleepMilSec) { + return CompletableFuture.supplyAsync(() -> { + try { + Thread.sleep(sleepMilSec); + } catch (InterruptedException e) { + e.printStackTrace(); + } + return MoveManager.MoveResult.COMPLETED; + }); + } + + private static CompletableFuture genCompletableFutureWithException(int sleepMilSec) { + return CompletableFuture.supplyAsync(() -> { + try { + Thread.sleep(sleepMilSec); + } catch (Exception ignored) { + } + throw new RuntimeException("Runtime Exception after doing work"); + }); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java index b8ac648e844..e2d3003af07 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java @@ -18,14 +18,23 @@ package org.apache.hadoop.hdds.scm.container.balancer; +import org.apache.commons.math3.util.ArithmeticUtils; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.ozone.test.LambdaTestUtils; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.List; +import java.util.Map; +import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -45,14 +54,210 @@ void testGetIterationStatistics() { ContainerBalancerTask task = mockedScm.startBalancerTask(config); List iterationStatistics = task.getCurrentIterationsStatistic(); - assertEquals(3, iterationStatistics.size()); - iterationStatistics.forEach(is -> { - assertTrue(is.getContainerMovesCompleted() > 0); - assertEquals(0, is.getContainerMovesFailed()); - assertEquals(0, is.getContainerMovesTimeout()); - assertFalse(is.getSizeEnteringNodesGB().isEmpty()); - assertFalse(is.getSizeLeavingNodesGB().isEmpty()); + assertEquals(2, iterationStatistics.size()); + + ContainerBalancerTaskIterationStatusInfo iterationHistory1 = iterationStatistics.get(0); + verifyCompletedIteration(iterationHistory1, 1); + + ContainerBalancerTaskIterationStatusInfo iterationHistory2 = iterationStatistics.get(1); + verifyCompletedIteration(iterationHistory2, 2); + } + + @Test + void testReRequestIterationStatistics() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(0); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTask(config); + List firstRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + Thread.sleep(1000L); + List secondRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + assertEquals(firstRequestIterationStatistics.get(0), secondRequestIterationStatistics.get(0)); + assertEquals(firstRequestIterationStatistics.get(1), secondRequestIterationStatistics.get(1)); + } + + @Test + void testGetCurrentStatisticsRequestInPeriodBetweenIterations() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(10000); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, false); + LambdaTestUtils.await(5000, 10, + () -> task.getCurrentIterationsStatistic().size() == 1 && + "ITERATION_COMPLETED".equals(task.getCurrentIterationsStatistic().get(0).getIterationResult())); + List iterationsStatic = task.getCurrentIterationsStatistic(); + assertEquals(1, iterationsStatic.size()); + + ContainerBalancerTaskIterationStatusInfo firstIteration = iterationsStatic.get(0); + verifyCompletedIteration(firstIteration, 1); + } + + @Test + void testCurrentStatisticsDoesntChangeWhenReRequestInPeriodBetweenIterations() throws InterruptedException { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(10000); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, false); + // Delay in finishing the first iteration + Thread.sleep(1000L); + List firstRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + // Delay occurred for some time during the period between iterations. + Thread.sleep(1000L); + List secondRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + assertEquals(1, firstRequestIterationStatistics.size()); + assertEquals(1, secondRequestIterationStatistics.size()); + assertEquals(firstRequestIterationStatistics.get(0), secondRequestIterationStatistics.get(0)); + } + + @Test + void testGetCurrentStatisticsWithDelay() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(0); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "1"); + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, configuration, true); + // Delay in finishing the first iteration + LambdaTestUtils.await(1100, 1, () -> task.getCurrentIterationsStatistic().size() == 1); + List iterationsStatic = task.getCurrentIterationsStatistic(); + assertEquals(1, iterationsStatic.size()); + ContainerBalancerTaskIterationStatusInfo currentIteration = iterationsStatic.get(0); + verifyStartedEmptyIteration(currentIteration); + } + + @Test + void testGetCurrentStatisticsWhileBalancingInProgress() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(3); + config.setBalancingInterval(0); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, false); + // Get the current iteration statistics when it has information about the containers moving. + LambdaTestUtils.await(5000, 1, + () -> task.getCurrentIterationsStatistic().size() == 2 && + task.getCurrentIterationsStatistic().get(1).getContainerMovesScheduled() > 0); + List iterationsStatic = task.getCurrentIterationsStatistic(); + assertEquals(2, iterationsStatic.size()); + ContainerBalancerTaskIterationStatusInfo currentIteration = iterationsStatic.get(1); + assertCurrentIterationStatisticWhileBalancingInProgress(currentIteration); + } + + private static void assertCurrentIterationStatisticWhileBalancingInProgress( + ContainerBalancerTaskIterationStatusInfo iterationsStatic + ) { + // No need to check others iterationsStatic fields(e.x. '*ContainerMoves*'), because it can lead to flaky results. + assertEquals(2, iterationsStatic.getIterationNumber()); + assertNull(iterationsStatic.getIterationResult()); + assertEquals(0, iterationsStatic.getContainerMovesFailed()); + assertEquals(0, iterationsStatic.getContainerMovesTimeout()); + iterationsStatic.getSizeEnteringNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); }); + iterationsStatic.getSizeLeavingNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); + }); + } + + private void verifyCompletedIteration( + ContainerBalancerTaskIterationStatusInfo iteration, + Integer expectedIterationNumber + ) { + assertEquals(expectedIterationNumber, iteration.getIterationNumber()); + assertEquals("ITERATION_COMPLETED", iteration.getIterationResult()); + assertNotNull(iteration.getIterationDuration()); + assertTrue(iteration.getContainerMovesScheduled() > 0); + assertTrue(iteration.getContainerMovesCompleted() > 0); + assertEquals(0, iteration.getContainerMovesFailed()); + assertEquals(0, iteration.getContainerMovesTimeout()); + assertTrue(iteration.getSizeScheduledForMove() > 0); + assertTrue(iteration.getDataSizeMoved() > 0); + assertFalse(iteration.getSizeEnteringNodes().isEmpty()); + assertFalse(iteration.getSizeLeavingNodes().isEmpty()); + iteration.getSizeEnteringNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); + }); + iteration.getSizeLeavingNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); + }); + Long enteringDataSum = getTotalMovedData(iteration.getSizeEnteringNodes()); + Long leavingDataSum = getTotalMovedData(iteration.getSizeLeavingNodes()); + assertEquals(enteringDataSum, leavingDataSum); + } + + private void verifyStartedEmptyIteration( + ContainerBalancerTaskIterationStatusInfo iteration + ) { + assertEquals(1, iteration.getIterationNumber()); + assertNull(iteration.getIterationResult()); + assertNotNull(iteration.getIterationDuration()); + assertEquals(0, iteration.getContainerMovesScheduled()); + assertEquals(0, iteration.getContainerMovesCompleted()); + assertEquals(0, iteration.getContainerMovesFailed()); + assertEquals(0, iteration.getContainerMovesTimeout()); + assertEquals(0, iteration.getSizeScheduledForMove()); + assertEquals(0, iteration.getDataSizeMoved()); + assertTrue(iteration.getSizeEnteringNodes().isEmpty()); + assertTrue(iteration.getSizeLeavingNodes().isEmpty()); + } + + private static Long getTotalMovedData(Map iteration) { + return iteration.values().stream().reduce(0L, ArithmeticUtils::addAndCheck); + } + + /** + * @see HDDS-11350 + */ + @Test + void testGetCurrentIterationsStatisticDoesNotThrowNullPointerExceptionWhenBalancingThreadIsSleeping() { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(10, OzoneConsts.GB)); + OzoneConfiguration ozoneConfig = new OzoneConfiguration(); + ContainerBalancerConfiguration config = ozoneConfig.getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + // the following config makes the balancing thread go to sleep while waiting for DU to be triggered in DNs and + // updated storage reports to arrive via DN heartbeats - of course, this is a unit test and NodeManager, DNs etc. + // are all mocked + config.setTriggerDuEnable(true); + mockedScm.init(config, ozoneConfig); + // run ContainerBalancerTask in a new thread and have the current thread call getCurrentIterationsStatistic + StorageContainerManager scm = mockedScm.getStorageContainerManager(); + ContainerBalancer cb = new ContainerBalancer(scm); + ContainerBalancerTask task = new ContainerBalancerTask(scm, 0, cb, cb.getMetrics(), config, false); + Thread thread = new Thread(task); + thread.setDaemon(true); + thread.start(); + Assertions.assertDoesNotThrow(task::getCurrentIterationsStatistic); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index d0e9cd53fec..ff1ff4f32c3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory; @@ -60,7 +59,6 @@ import java.io.IOException; import java.time.Clock; -import java.time.Duration; import java.time.ZoneId; import java.util.ArrayList; import java.util.HashMap; @@ -73,16 +71,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotSame; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.anyString; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -118,7 +115,7 @@ public class TestContainerBalancerTask { private static final ThreadLocalRandom RANDOM = ThreadLocalRandom.current(); private StatefulServiceStateManager serviceStateManager; - private static final long STORAGE_UNIT = OzoneConsts.GB; + static final long STORAGE_UNIT = OzoneConsts.GB; /** * Sets up configuration values and creates a mock cluster. @@ -139,12 +136,7 @@ public void setup(TestInfo testInfo) throws IOException, NodeNotFoundException, .thenReturn(CompletableFuture.completedFuture( MoveManager.MoveResult.COMPLETED)); - /* - Disable LegacyReplicationManager. This means balancer should select RATIS - as well as EC containers for balancing. Also, MoveManager will be used. - */ when(replicationManager.getConfig()).thenReturn(rmConf); - rmConf.setEnableLegacy(false); // these configs will usually be specified in each test balancerConfiguration = conf.getObject(ContainerBalancerConfiguration.class); @@ -178,12 +170,6 @@ public void setup(TestInfo testInfo) throws IOException, NodeNotFoundException, .isContainerReplicatingOrDeleting(any(ContainerID.class))) .thenReturn(false); - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(CompletableFuture. - completedFuture(MoveManager.MoveResult.COMPLETED)); - when(replicationManager.getClock()) .thenReturn(Clock.system(ZoneId.systemDefault())); @@ -337,229 +323,6 @@ public void testContainerBalancerConfiguration() { cbConf.getMoveReplicationTimeout().toMinutes()); } - @Test - public void checkIterationResult() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, - TimeoutException { - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - rmConf.setEnableLegacy(true); - - startBalancer(balancerConfiguration); - - /* - According to the setup and configurations, this iteration's result should - be ITERATION_COMPLETED. - */ - assertEquals( - ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - stopBalancer(); - - /* - Now, limit maxSizeToMovePerIteration but fail all container moves. The - result should still be ITERATION_COMPLETED. - */ - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(CompletableFuture.completedFuture( - MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY)); - balancerConfiguration.setMaxSizeToMovePerIteration(10 * STORAGE_UNIT); - - startBalancer(balancerConfiguration); - - assertEquals( - ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - stopBalancer(); - - /* - Try the same but use MoveManager for container move instead of legacy RM. - */ - rmConf.setEnableLegacy(false); - startBalancer(balancerConfiguration); - assertEquals( - ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - stopBalancer(); - } - - /** - * Tests the situation where some container moves time out because they - * take longer than "move.timeout". - */ - @Test - public void checkIterationResultTimeout() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, - TimeoutException { - - CompletableFuture completedFuture = - CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED); - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(completedFuture) - .thenAnswer(invocation -> genCompletableFuture(2000)); - - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMoveTimeout(Duration.ofMillis(500)); - rmConf.setEnableLegacy(true); - startBalancer(balancerConfiguration); - - /* - According to the setup and configurations, this iteration's result should - be ITERATION_COMPLETED. - */ - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertEquals(1, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(1); - stopBalancer(); - - /* - Test the same but use MoveManager instead of LegacyReplicationManager. - The first move being 10ms falls within the timeout duration of 500ms. It - should be successful. The rest should fail. - */ - rmConf.setEnableLegacy(false); - when(moveManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(completedFuture) - .thenAnswer(invocation -> genCompletableFuture(2000)); - - startBalancer(balancerConfiguration); - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertEquals(1, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(1); - stopBalancer(); - } - - @Test - public void checkIterationResultTimeoutFromReplicationManager() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, TimeoutException { - CompletableFuture future - = CompletableFuture.supplyAsync(() -> - MoveManager.MoveResult.REPLICATION_FAIL_TIME_OUT); - CompletableFuture future2 - = CompletableFuture.supplyAsync(() -> - MoveManager.MoveResult.DELETION_FAIL_TIME_OUT); - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(future, future2); - - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMoveTimeout(Duration.ofMillis(500)); - rmConf.setEnableLegacy(true); - startBalancer(balancerConfiguration); - - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(0); - assertEquals(0, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - stopBalancer(); - - /* - Try the same test with MoveManager instead of LegacyReplicationManager. - */ - when(moveManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(future).thenAnswer(invocation -> future2); - - rmConf.setEnableLegacy(false); - startBalancer(balancerConfiguration); - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(0); - assertEquals(0, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - stopBalancer(); - } - - @Test - public void checkIterationResultException() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, - TimeoutException { - - CompletableFuture future = - new CompletableFuture<>(); - future.completeExceptionally(new RuntimeException("Runtime Exception")); - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(CompletableFuture.supplyAsync(() -> { - try { - Thread.sleep(1); - } catch (Exception ignored) { - } - throw new RuntimeException("Runtime Exception after doing work"); - })) - .thenThrow(new ContainerNotFoundException("Test Container not found")) - .thenReturn(future); - - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMoveTimeout(Duration.ofMillis(500)); - rmConf.setEnableLegacy(true); - - startBalancer(balancerConfiguration); - - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertThat(containerBalancerTask.getMetrics().getNumContainerMovesFailed()) - .isGreaterThanOrEqualTo(3); - stopBalancer(); - - /* - Try the same test but with MoveManager instead of ReplicationManager. - */ - when(moveManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(CompletableFuture.supplyAsync(() -> { - try { - Thread.sleep(1); - } catch (Exception ignored) { - } - throw new RuntimeException("Runtime Exception after doing work"); - })) - .thenThrow(new ContainerNotFoundException("Test Container not found")) - .thenReturn(future); - - rmConf.setEnableLegacy(false); - startBalancer(balancerConfiguration); - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertThat(containerBalancerTask.getMetrics().getNumContainerMovesFailed()) - .isGreaterThanOrEqualTo(3); - stopBalancer(); - } - @Test public void testDelayedStart() throws InterruptedException, TimeoutException { conf.setTimeDuration("hdds.scm.wait.time.after.safemode.exit", 10, @@ -593,40 +356,6 @@ public void testDelayedStart() throws InterruptedException, TimeoutException { assertFalse(balancingThread.isAlive()); } - /** - * The expectation is that only RATIS containers should be selected for - * balancing when LegacyReplicationManager is enabled. This is because - * LegacyReplicationManager does not support moving EC containers. - */ - @Test - public void balancerShouldExcludeECContainersWhenLegacyRmIsEnabled() - throws IllegalContainerBalancerStateException, IOException, - InvalidContainerBalancerConfigurationException, TimeoutException { - // Enable LegacyReplicationManager - rmConf.setEnableLegacy(true); - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - - startBalancer(balancerConfiguration); - - /* - Get all containers that were selected by balancer and assert none of - them is an EC container. - */ - Map containerToSource = - containerBalancerTask.getContainerToSourceMap(); - assertFalse(containerToSource.isEmpty()); - for (Map.Entry entry : - containerToSource.entrySet()) { - ContainerInfo containerInfo = cidToInfoMap.get(entry.getKey()); - assertNotSame(HddsProtos.ReplicationType.EC, - containerInfo.getReplicationType()); - } - } - /** * Tests if balancer is adding the polled source datanode back to potentialSources queue * if a move has failed due to a container related failure, like REPLICATION_FAIL_NOT_EXIST_IN_SOURCE. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java deleted file mode 100644 index 8aac64de702..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java +++ /dev/null @@ -1,3489 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.replication; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.ContainerStateManagerImpl; -import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; -import org.apache.hadoop.hdds.scm.container.SimpleMockNodeManager; -import org.apache.hadoop.hdds.scm.container.balancer.MoveManager; -import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager.LegacyReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.PlacementPolicy; -import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.ha.SCMHAManager; -import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; -import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; -import org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.scm.node.NodeStatus; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.hdds.utils.db.LongCodec; -import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.TestClock; -import org.apache.ozone.test.tag.Unhealthy; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.EnumSource; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.File; -import java.io.IOException; -import java.time.Clock; -import java.time.Instant; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.QUASI_CLOSED; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.UNHEALTHY; -import static org.apache.hadoop.hdds.scm.HddsTestUtils.CONTAINER_NUM_KEYS_DEFAULT; -import static org.apache.hadoop.hdds.scm.HddsTestUtils.CONTAINER_USED_BYTES_DEFAULT; -import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; -import static org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicaBuilder; -import static org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicas; -import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertSame; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.anyLong; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.argThat; -import static org.mockito.Mockito.anyList; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.when; - -/** - * Test cases to verify the functionality of ReplicationManager. - */ -public class TestLegacyReplicationManager { - - private ReplicationManager replicationManager; - private ContainerStateManager containerStateManager; - private PlacementPolicy ratisContainerPlacementPolicy; - private PlacementPolicy ecContainerPlacementPolicy; - private EventQueue eventQueue; - private DatanodeCommandHandler datanodeCommandHandler; - private SimpleMockNodeManager nodeManager; - private ContainerManager containerManager; - private GenericTestUtils.LogCapturer scmLogs; - private SCMServiceManager serviceManager; - private TestClock clock; - private DBStore dbStore; - private ContainerReplicaPendingOps containerReplicaPendingOps; - - @TempDir - private File tempDir; - - int getInflightCount(InflightType type) { - return replicationManager.getLegacyReplicationManager() - .getInflightCount(type); - } - - @BeforeEach - void setup(@TempDir File testDir) throws IOException, InterruptedException, - NodeNotFoundException, InvalidStateTransitionException { - OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - conf.setTimeDuration( - HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, - 0, TimeUnit.SECONDS); - - scmLogs = GenericTestUtils.LogCapturer. - captureLogs(LegacyReplicationManager.LOG); - containerManager = mock(ContainerManager.class); - nodeManager = new SimpleMockNodeManager(); - eventQueue = new EventQueue(); - SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); - PipelineManager pipelineManager = mock(PipelineManager.class); - when(pipelineManager.containsPipeline(any(PipelineID.class))) - .thenReturn(true); - containerStateManager = ContainerStateManagerImpl.newBuilder() - .setConfiguration(conf) - .setPipelineManager(pipelineManager) - .setRatisServer(scmhaManager.getRatisServer()) - .setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)) - .setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()) - .setContainerReplicaPendingOps(new ContainerReplicaPendingOps( - Clock.system(ZoneId.systemDefault()))) - .build(); - serviceManager = new SCMServiceManager(); - - datanodeCommandHandler = new DatanodeCommandHandler(); - eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler); - - when(containerManager.getContainers()) - .thenAnswer(invocation -> { - Set ids = containerStateManager.getContainerIDs(); - List containers = new ArrayList<>(); - for (ContainerID id : ids) { - containers.add(containerStateManager.getContainer( - id)); - } - return containers; - }); - - when(containerManager.getContainer(any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainer(((ContainerID)invocation - .getArguments()[0]))); - - when(containerManager.getContainerReplicas( - any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainerReplicas(((ContainerID)invocation - .getArguments()[0]))); - - ratisContainerPlacementPolicy = mock(PlacementPolicy.class); - ecContainerPlacementPolicy = mock(PlacementPolicy.class); - - when(ratisContainerPlacementPolicy.chooseDatanodes( - any(), any(), anyInt(), - anyLong(), anyLong())) - .thenAnswer(invocation -> { - int count = (int) invocation.getArguments()[2]; - return IntStream.range(0, count) - .mapToObj(i -> randomDatanodeDetails()) - .collect(Collectors.toList()); - }); - - when(ratisContainerPlacementPolicy.validateContainerPlacement( - any(), - anyInt() - )).thenAnswer(invocation -> - new ContainerPlacementStatusDefault(2, 2, 3)); - clock = new TestClock(Instant.now(), ZoneId.of("UTC")); - containerReplicaPendingOps = new ContainerReplicaPendingOps(clock); - createReplicationManager(newRMConfig()); - } - - void createReplicationManager(int replicationLimit, int deletionLimit) - throws Exception { - replicationManager.stop(); - dbStore.close(); - final LegacyReplicationManagerConfiguration conf - = new LegacyReplicationManagerConfiguration(); - conf.setContainerInflightReplicationLimit(replicationLimit); - conf.setContainerInflightDeletionLimit(deletionLimit); - createReplicationManager(conf); - } - - void createReplicationManager( - LegacyReplicationManagerConfiguration conf) - throws Exception { - createReplicationManager(newRMConfig(), conf); - } - - private void createReplicationManager(ReplicationManagerConfiguration rmConf) - throws InterruptedException, IOException { - createReplicationManager(rmConf, null); - } - - private void createReplicationManager(ReplicationManagerConfiguration rmConf, - LegacyReplicationManagerConfiguration lrmConf) - throws InterruptedException, IOException { - OzoneConfiguration config = SCMTestUtils.getConf(tempDir); - config.setTimeDuration( - HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, - 0, TimeUnit.SECONDS); - Optional.ofNullable(rmConf).ifPresent(config::setFromObject); - Optional.ofNullable(lrmConf).ifPresent(config::setFromObject); - - SCMHAManager scmHAManager = SCMHAManagerStub - .getInstance(true, new SCMDBTransactionBufferImpl()); - dbStore = DBStoreBuilder.createDBStore( - config, new SCMDBDefinition()); - - LegacyReplicationManager legacyRM = new LegacyReplicationManager( - config, containerManager, ratisContainerPlacementPolicy, eventQueue, - SCMContext.emptyContext(), nodeManager, scmHAManager, clock, - SCMDBDefinition.MOVE.getTable(dbStore)); - - replicationManager = new ReplicationManager( - config, - containerManager, - ratisContainerPlacementPolicy, - ecContainerPlacementPolicy, - eventQueue, - SCMContext.emptyContext(), - nodeManager, - clock, - legacyRM, - containerReplicaPendingOps); - - serviceManager.register(replicationManager); - serviceManager.notifyStatusChanged(); - scmLogs.clearOutput(); - Thread.sleep(100L); - } - - @AfterEach - public void teardown() throws Exception { - containerStateManager.close(); - replicationManager.stop(); - if (dbStore != null) { - dbStore.close(); - } - } - - @Nested - class Misc { - /** - * Checks if restarting of replication manager works. - */ - @Test - public void testReplicationManagerRestart() throws InterruptedException { - assertTrue(replicationManager.isRunning()); - replicationManager.stop(); - // Stop is a non-blocking call, it might take sometime for the - // ReplicationManager to shutdown - Thread.sleep(500); - assertFalse(replicationManager.isRunning()); - replicationManager.start(); - assertTrue(replicationManager.isRunning()); - } - - @Test - public void testGetContainerReplicaCount() - throws IOException, TimeoutException { - ContainerInfo container = createContainer(LifeCycleState.QUASI_CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), UNHEALTHY); - addReplica(container, NodeStatus.inServiceHealthy(), UNHEALTHY); - ContainerReplica decommissioningReplica = - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), - UNHEALTHY); - - ContainerReplicaCount replicaCount = - replicationManager.getLegacyReplicationManager() - .getContainerReplicaCount(container); - - assertInstanceOf(LegacyRatisContainerReplicaCount.class, replicaCount); - assertFalse(replicaCount.isSufficientlyReplicated()); - assertFalse(replicaCount.isSufficientlyReplicatedForOffline( - decommissioningReplica.getDatanodeDetails(), nodeManager)); - - addReplica(container, NodeStatus.inServiceHealthy(), UNHEALTHY); - replicaCount = replicationManager.getLegacyReplicationManager() - .getContainerReplicaCount(container); - assertTrue(replicaCount.isSufficientlyReplicated()); - assertTrue(replicaCount.isSufficientlyReplicatedForOffline( - decommissioningReplica.getDatanodeDetails(), nodeManager)); - assertTrue(replicaCount.isHealthyEnoughForOffline()); - } - } - - /** - * Tests replication manager with healthy open and closed containers. No - * quasi closed or unhealthy containers are involved. - */ - @Nested - class StableReplicas { - /** - * Open containers are not handled by ReplicationManager. - * This test-case makes sure that ReplicationManages doesn't take - * any action on OPEN containers. - */ - @Test - public void testOpenContainer() throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.OPEN); - containerStateManager.addContainer(container.getProtobuf()); - replicationManager.processAll(); - eventQueue.processAll(1000); - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.OPEN)); - assertEquals(0, datanodeCommandHandler.getInvocation()); - } - - /** - * 1 open replica - * 2 closing replicas - * Expectation: Close command is sent to the open replicas. - */ - @Test - public void testClosingContainer() throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final ContainerID id = container.containerID(); - - containerStateManager.addContainer(container.getProtobuf()); - - // Two replicas in CLOSING state - final Set replicas = getReplicas(id, State.CLOSING, - randomDatanodeDetails(), - randomDatanodeDetails()); - - // One replica in OPEN state - final DatanodeDetails datanode = randomDatanodeDetails(); - replicas.addAll(getReplicas(id, State.OPEN, datanode)); - - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentCloseCommandCount + 3, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - - // Update the OPEN to CLOSING - for (ContainerReplica replica: getReplicas(id, State.CLOSING, datanode)) { - containerStateManager.updateContainerReplica(id, replica); - } - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentCloseCommandCount + 6, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.CLOSING)); - } - - /** - * Create closing container with 1 replica. - * Expectation: Missing containers 0. - * Remove the only replica. - * Expectation: Missing containers 1. - */ - @Test - public void testClosingMissingContainer() - throws IOException, InvalidStateTransitionException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final ContainerID id = container.containerID(); - - containerStateManager.addContainer(container.getProtobuf()); - - // One replica in OPEN state - final Set replicas = getReplicas(id, State.OPEN, - randomDatanodeDetails()); - - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentCloseCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.CLOSING)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.MISSING)); - - for (ContainerReplica replica : replicas) { - containerStateManager.removeContainerReplica(id, replica); - } - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentCloseCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - - report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.CLOSING)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.MISSING)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.MIS_REPLICATED)); - - verify(containerManager, times(0)).updateContainerState(container.containerID(), - LifeCycleEvent.QUASI_CLOSE); - } - - @Test - public void testReplicateCommandTimeout() - throws IOException, TimeoutException { - long timeout = new ReplicationManagerConfiguration().getEventTimeout(); - - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - assertReplicaScheduled(1); - - // Already a pending replica, so nothing scheduled - assertReplicaScheduled(0); - - // Advance the clock past the timeout, and there should be a replica - // scheduled - clock.fastForward(timeout + 1000); - assertReplicaScheduled(1); - assertEquals(1, replicationManager.getMetrics() - .getReplicaCreateTimeoutTotal()); - } - - @Test - public void testDeleteCommandTimeout() - throws IOException, TimeoutException { - long timeout = new ReplicationManagerConfiguration().getEventTimeout(); - - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - assertDeleteScheduled(1); - - // Already a pending replica, so nothing scheduled - assertReplicaScheduled(0); - - // Advance the clock past the timeout, and there should be a replica - // scheduled - clock.fastForward(timeout + 1000); - assertDeleteScheduled(1); - assertEquals(1, replicationManager.getMetrics() - .getReplicaDeleteTimeoutTotal()); - } - - /** - * A closed empty container with all the replicas also closed and empty - * should be deleted. - * A container/ replica should be deemed empty when it has 0 keyCount even - * if the usedBytes is not 0 (usedBytes should not be used to determine if - * the container or replica is empty). - */ - @Test - public void testDeleteEmptyContainer() throws Exception { - runTestDeleteEmptyContainer(3); - } - - Void runTestDeleteEmptyContainer(int expectedDelete) throws Exception { - // Create container with usedBytes = 1000 and keyCount = 0 - final ContainerInfo container = createContainer( - LifeCycleState.CLOSED, 1000, 0); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - // Create a replica with usedBytes != 0 and keyCount = 0 - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED, 100, 0); - - assertDeleteScheduled(expectedDelete); - return null; - } - - @Test - public void testEmptyContainerWithNoReplicas() throws Exception { - final ContainerInfo container = createContainer( - LifeCycleState.CLOSED, 0, 0); - // No replicas - replicationManager.processAll(); - eventQueue.processAll(1000); - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, - report.getStat(ReplicationManagerReport.HealthState.EMPTY)); - assertEquals(LifeCycleState.CLOSED, container.getState()); - } - - @Test - public void testDeletionLimit() throws Exception { - runTestLimit(0, 2, 0, 1, - () -> runTestDeleteEmptyContainer(2)); - } - - /** - * A closed empty container with a non-empty replica should not be deleted. - */ - @Test - public void testDeleteEmptyContainerNonEmptyReplica() throws Exception { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED, 0, - 0); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - // Create the 3rd replica with non-zero key count and used bytes - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED, 100, 1); - assertDeleteScheduled(0); - } - - /** - * ReplicationManager should replicate zero replica when all copies - * are missing. - */ - @Test - public void testContainerWithMissingReplicas() - throws IOException, TimeoutException { - createContainer(LifeCycleState.CLOSED); - assertReplicaScheduled(0); - assertUnderReplicatedCount(1); - assertMissingCount(1); - } - - /** - * 3 healthy closed replicas. - * Expectation: No action. - * - * ReplicationManager should not take any action if the container is - * CLOSED and healthy. - */ - @Test - public void testHealthyClosedContainer() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSED); - final ContainerID id = container.containerID(); - final Set replicas = getReplicas(id, State.CLOSED, - randomDatanodeDetails(), - randomDatanodeDetails(), - randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(0, datanodeCommandHandler.getInvocation()); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.CLOSED)); - for (ReplicationManagerReport.HealthState s : - ReplicationManagerReport.HealthState.values()) { - assertEquals(0, report.getStat(s)); - } - } - } - - /** - * Tests replication manager with unhealthy and quasi-closed container - * replicas. - */ - @Nested - class UnstableReplicas { - - /** - * A CLOSING container which has only UNHEALTHY replicas should be moved - * to QUASI_CLOSED state so that RM can then maintain replication factor - * number of replicas. - */ - @Test - public void testClosingContainerWithOnlyUnhealthyReplicas() - throws IOException, InvalidStateTransitionException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final ContainerID id = container.containerID(); - containerStateManager.addContainer(container.getProtobuf()); - - // all replicas are UNHEALTHY - final Set replicas = getReplicas(id, UNHEALTHY, - randomDatanodeDetails(), randomDatanodeDetails(), - randomDatanodeDetails()); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - replicationManager.processAll(); - verify(containerManager, times(1)) - .updateContainerState(container.containerID(), - LifeCycleEvent.QUASI_CLOSE); - - containerStateManager.updateContainerState( - container.containerID().getProtobuf(), LifeCycleEvent.QUASI_CLOSE); - - replicationManager.processAll(); - assertEquals(1, - replicationManager.getContainerReport().getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - } - - /** - * Close command should be sent to the healthy replicas. The container - * should not be moved to quasi-closed immediately. - */ - @Test - public void testClosingContainerWithSomeUnhealthyReplicas() - throws IOException, InvalidStateTransitionException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final ContainerID id = container.containerID(); - containerStateManager.addContainer(container.getProtobuf()); - - // 2 UNHEALTHY, 1 OPEN - final Set replicas = getReplicas(id, UNHEALTHY, - randomDatanodeDetails(), randomDatanodeDetails()); - final DatanodeDetails datanode = randomDatanodeDetails(); - replicas.addAll(getReplicas(id, State.OPEN, datanode)); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - replicationManager.processAll(); - eventQueue.processAll(1000); - - verify(containerManager, times(0)) - .updateContainerState(container.containerID(), - LifeCycleEvent.QUASI_CLOSE); - assertEquals(currentCloseCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - assertEquals(1, datanodeCommandHandler.getReceivedCommands().size()); - SCMCommand command = - datanodeCommandHandler.getReceivedCommands().iterator().next() - .getCommand(); - assertSame(SCMCommandProto.Type.closeContainerCommand, - command.getType()); - CloseContainerCommand closeCommand = (CloseContainerCommand) command; - assertFalse(closeCommand.isForce()); - } - - /** - * 2 open replicas - * 1 quasi-closed replica - * Expectation: close command is sent to the open replicas. - */ - @Test - public void testQuasiClosedContainerWithTwoOpenReplica() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.OPEN, 1000L, originNodeId, randomDatanodeDetails()); - final DatanodeDetails datanodeDetails = randomDatanodeDetails(); - final ContainerReplica replicaThree = getReplicas( - id, State.OPEN, 1000L, datanodeDetails.getUuid(), datanodeDetails); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica( - id, replicaThree); - - // First iteration - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - // Two of the replicas are in OPEN state - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentCloseCommandCount + 2, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.closeContainerCommand, - replicaTwo.getDatanodeDetails())); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.closeContainerCommand, - replicaThree.getDatanodeDetails())); - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - } - - /** - * 3 quasi closed replicas with the same origin node ID. - * Expectation: No action taken. - * - * When the container is in QUASI_CLOSED state and all the replicas are - * also in QUASI_CLOSED state and doesn't have a quorum to force close - * the container, ReplicationManager will not do anything. - */ - @Test - public void testHealthyQuasiClosedContainer() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica( - id, replicaThree); - - // All the QUASI_CLOSED replicas have same originNodeId, so the - // container will not be closed. ReplicationManager should take no action. - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(0, datanodeCommandHandler.getInvocation()); - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - } - - /** - * 2 quasi-closed replicas. - * 1 unhealthy replica. - * All replicas have same origin node ID. - * Expectation: - * Round 1: Quasi closed replica is replicated. - * Round 2: Unhealthy replica is deleted. - * - * When a container is QUASI_CLOSED and we don't have quorum to force close - * the container, the container should have all the replicas in QUASI_CLOSED - * state, else ReplicationManager will take action. - * - * In this test case we make one of the replica unhealthy, replication - * manager will send delete container command to the datanode which has the - * unhealthy replica. - */ - @Test - public void testQuasiClosedContainerWithUnhealthyReplica() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(100); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - - int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - - // All the QUASI_CLOSED replicas have same originNodeId, so the - // container will not be closed. ReplicationManager should take no action. - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(0, datanodeCommandHandler.getInvocation()); - - // Make the first replica unhealthy - final ContainerReplica unhealthyReplica = getReplicas( - id, UNHEALTHY, 1000L, originNodeId, - replicaOne.getDatanodeDetails()); - containerStateManager.updateContainerReplica( - id, unhealthyReplica); - - long currentBytesToReplicate = replicationManager.getMetrics() - .getReplicationBytesTotal(); - replicationManager.processAll(); - eventQueue.processAll(1000); - // Under replication handler should first re-replicate one of the quasi - // closed containers. - // The unhealthy container should not have been deleted in the first pass. - assertDeleteScheduled(0); - currentReplicateCommandCount += 1; - currentBytesToReplicate += 100L; - assertEquals(currentReplicateCommandCount, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - assertEquals(currentBytesToReplicate, - replicationManager.getMetrics().getReplicationBytesTotal()); - assertEquals(1, getInflightCount(InflightType.REPLICATION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightReplication()); - - // The quasi closed container cannot be closed, but it should have been - // restored to full replication on the previous run. - // The unhealthy replica should remain until the next iteration. - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNHEALTHY)); - - // Create the replica so replication manager sees it on the next run. - List replicateCommands = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() - .equals(SCMCommandProto.Type.replicateContainerCommand)) - .collect(Collectors.toList()); - for (CommandForDatanode replicateCommand: replicateCommands) { - DatanodeDetails newNode = createDatanodeDetails( - replicateCommand.getDatanodeId()); - ContainerReplica newReplica = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, newNode); - containerStateManager.updateContainerReplica(id, newReplica); - } - - // On the next run, the unhealthy container should be scheduled for - // deletion, since the quasi closed container is now sufficiently - // replicated. - // This method runs an iteration of replication manager. - assertDeleteScheduled(1); - assertExactDeleteTargets(unhealthyReplica.getDatanodeDetails()); - // Replication should have finished on the previous iteration, leaving - // these numbers unchanged. - assertEquals(currentReplicateCommandCount, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - assertEquals(currentBytesToReplicate, - replicationManager.getMetrics().getReplicationBytesTotal()); - assertEquals(0, getInflightCount(InflightType.REPLICATION)); - assertEquals(0, replicationManager.getMetrics().getInflightReplication()); - - // Now we will delete the unhealthy replica. - containerStateManager.removeContainerReplica(id, unhealthyReplica); - - // There should be no work left on the following runs. - replicationManager.processAll(); - eventQueue.processAll(1000); - // The two commands shown are the previous delete and replicate commands. - assertEquals(2, datanodeCommandHandler.getInvocation()); - } - - - /** - * Container is quasi closed. - * 3 quasi-closed replicas with the same origin node ID. - * 1 unhealthy replica with unique origin node ID. - * - * Expectation: - * No action taken. 3 healthy replicas are present. The unhealthy replica - * should not be deleted since it has a unique origin node ID. The - * container cannot be closed because there are not enough healthy unique - * origin node IDs. - */ - @Test - public void testQuasiClosedContainerWithUniqueUnhealthyReplica() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(100); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replica4 = getReplicas( - id, UNHEALTHY, 1000L, randomDatanodeDetails().getUuid(), - randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - containerStateManager.updateContainerReplica(id, replica4); - - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - - // now, add a copy of the UNHEALTHY replica on a decommissioning node, - // and another on a dead node. The expectation is still that no replica - // should be deleted as both these nodes are likely going away soon. - final ContainerReplica replica5 = getReplicas( - id, UNHEALTHY, 1000L, replica4.getOriginDatanodeId(), - randomDatanodeDetails()); - nodeManager.register(replica5.getDatanodeDetails(), - new NodeStatus(DECOMMISSIONING, HEALTHY)); - DatanodeDetails deadNode = randomDatanodeDetails(); - nodeManager.register(deadNode, NodeStatus.inServiceDead()); - final ContainerReplica replica6 = getReplicas( - id, UNHEALTHY, 1000L, replica4.getOriginDatanodeId(), - deadNode); - containerStateManager.updateContainerReplica(container.containerID(), - replica5); - containerStateManager.updateContainerReplica(container.containerID(), - replica6); - - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - // Even though we have extra replicas, we are deliberately keeping them - // since they are unique. This does not count as over-replication. - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.OVER_REPLICATED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNHEALTHY)); - } - - /** - * Situation: QUASI_CLOSED container with 3 QUASI_CLOSED replicas - * whose Sequence ID is smaller than the container's. There are 2 - * UNHEALTHY replicas with Sequence ID same as the container's. One of - * them is on a decommissioning node. - * - * Expectation: Replication command should be sent for the UNHEALTHY - * replica on the decommissioning node. - */ - @Test - public void testQuasiClosedHavingUnhealthyReplicaWithGreatestBCSID() - throws IOException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(100); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - long sequenceID = container.getSequenceId(); - final ContainerReplica replica1 = - getReplicas(id, QUASI_CLOSED, sequenceID - 1, originNodeId, - randomDatanodeDetails()); - final ContainerReplica replica2 = getReplicas( - id, QUASI_CLOSED, sequenceID - 1, originNodeId, - randomDatanodeDetails()); - final ContainerReplica replica3 = getReplicas( - id, QUASI_CLOSED, sequenceID - 1, originNodeId, - randomDatanodeDetails()); - DatanodeDetails decommissioning = - MockDatanodeDetails.randomDatanodeDetails(); - decommissioning.setPersistedOpState(DECOMMISSIONING); - nodeManager.register(decommissioning, - new NodeStatus(DECOMMISSIONING, HEALTHY)); - final ContainerReplica replica4 = getReplicas( - id, UNHEALTHY, sequenceID, decommissioning.getUuid(), - decommissioning); - final ContainerReplica replica5 = getReplicas( - id, UNHEALTHY, sequenceID, randomDatanodeDetails().getUuid(), - randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replica1); - containerStateManager.updateContainerReplica(id, replica2); - containerStateManager.updateContainerReplica(id, replica3); - containerStateManager.updateContainerReplica(id, replica4); - containerStateManager.updateContainerReplica(id, replica5); - - replicationManager.processAll(); - eventQueue.processAll(1000); - - // 1 replicate command should have been sent - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(1, - replicationManager.getContainerReport().getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - - // the following code asserts that replicate was sent for the UNHEALTHY - // replica on the decommissioning node - CommandForDatanode command = - datanodeCommandHandler.getReceivedCommands().iterator().next(); - assertEquals(SCMCommandProto.Type.replicateContainerCommand, - command.getCommand().getType()); - ReplicateContainerCommand replicateCommand = - (ReplicateContainerCommand) command.getCommand(); - assertEquals(1, replicateCommand.getSourceDatanodes().size()); - assertEquals(replica4.getDatanodeDetails(), - replicateCommand.getSourceDatanodes().iterator().next()); - - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - - // If we don't complete the pending add by the next iteration, it's - // expected that another replicate command is not sent. - replicationManager.processAll(); - eventQueue.processAll(100); - - // that 1 command is the one RM sent in the last iteration - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - - // Now, we will complete the add. Expectation is that no new commands - // should be sent in the next iteration. - ContainerReplica newReplica = - getReplicas(container.containerID(), UNHEALTHY, - container.getSequenceId(), replica4.getOriginDatanodeId(), - MockDatanodeDetails.createDatanodeDetails( - command.getDatanodeId())); - containerStateManager.updateContainerReplica(container.containerID(), - newReplica); - - replicationManager.processAll(); - eventQueue.processAll(100); - - // that 1 command is the one RM sent in the last iteration - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - } - - /** - * Situation: QUASI_CLOSED container with 2 QUASI_CLOSED replicas - * whose Sequence ID is smaller than the container's. There's 1 - * UNHEALTHY replica with Sequence ID same as the container's and on a - * decommissioning node. - *

    - * Expectation: First, one of the QUASI_CLOSED should be replicated to - * get 3 of them. In the next iteration, the UNHEALTHY replica should be - * replicated. This also verifies that HDDS-9321 does not introduce a - * regression in the under replication flow. - */ - @Test - public void testUnderRepQuasiClosedHavingUnhealthyWithGreatestBCSID() - throws IOException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(100); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - long sequenceID = container.getSequenceId(); - final ContainerReplica replica1 = - getReplicas(id, QUASI_CLOSED, sequenceID - 1, originNodeId, - randomDatanodeDetails()); - final ContainerReplica replica2 = getReplicas( - id, QUASI_CLOSED, sequenceID - 1, originNodeId, - randomDatanodeDetails()); - DatanodeDetails decommissioning = - MockDatanodeDetails.randomDatanodeDetails(); - decommissioning.setPersistedOpState(DECOMMISSIONING); - final ContainerReplica unhealthyReplica = getReplicas( - id, UNHEALTHY, sequenceID, decommissioning.getUuid(), - decommissioning); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replica1); - containerStateManager.updateContainerReplica(id, replica2); - containerStateManager.updateContainerReplica(id, unhealthyReplica); - - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(1, - replicationManager.getContainerReport().getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - - // the following code asserts that the replicate command was sent for - // either of the QUASI_CLOSED replicas - CommandForDatanode command = - datanodeCommandHandler.getReceivedCommands().iterator().next(); - assertEquals(SCMCommandProto.Type.replicateContainerCommand, - command.getCommand().getType()); - ReplicateContainerCommand replicateCommand = - (ReplicateContainerCommand) command.getCommand(); - List sourceDatanodes = - replicateCommand.getSourceDatanodes(); - assertEquals(2, sourceDatanodes.size()); - assertThat(sourceDatanodes).doesNotContain(unhealthyReplica.getDatanodeDetails()); - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - - // now, add a QUASI_CLOSED replica, which is a copy of replica1 - ContainerReplica newReplica = - getReplicas(container.containerID(), QUASI_CLOSED, - container.getSequenceId() - 1, replica1.getOriginDatanodeId(), - MockDatanodeDetails.createDatanodeDetails( - command.getDatanodeId())); - containerStateManager.updateContainerReplica(container.containerID(), - newReplica); - datanodeCommandHandler.clearState(); - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(1, - replicationManager.getContainerReport().getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - assertEquals(1, - datanodeCommandHandler.getReceivedCommands().size()); - command = datanodeCommandHandler.getReceivedCommands().iterator().next(); - assertEquals(SCMCommandProto.Type.replicateContainerCommand, - command.getCommand().getType()); - replicateCommand = (ReplicateContainerCommand) command.getCommand(); - assertEquals(1, replicateCommand.getSourceDatanodes().size()); - assertEquals(unhealthyReplica.getDatanodeDetails(), - replicateCommand.getSourceDatanodes().iterator().next()); - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - } - - /** - * 2 closed replica - * 1 quasi-closed replicas - * SCM state is closed. - * Expectation: The replicate container command should only contain the - * closed replicas as sources. - */ - @Test - public void testOnlyMatchingClosedReplicasReplicated() - throws IOException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica quasiReplica = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica closedReplica1 = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final DatanodeDetails datanodeDetails = randomDatanodeDetails(); - final ContainerReplica closedReplica2 = getReplicas( - id, State.CLOSED, 1000L, datanodeDetails.getUuid(), datanodeDetails); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, quasiReplica); - containerStateManager.updateContainerReplica(id, closedReplica1); - containerStateManager.updateContainerReplica(id, closedReplica2); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - Optional cmdOptional = - datanodeCommandHandler.getReceivedCommands().stream().findFirst(); - assertTrue(cmdOptional.isPresent()); - SCMCommand scmCmd = cmdOptional.get().getCommand(); - assertInstanceOf(ReplicateContainerCommand.class, scmCmd); - ReplicateContainerCommand repCmd = (ReplicateContainerCommand) scmCmd; - - // Only the closed replicas should have been used as sources. - List repSources = repCmd.getSourceDatanodes(); - assertEquals(2, repSources.size()); - assertThat(repSources).containsAll( - Arrays.asList(closedReplica1.getDatanodeDetails(), - closedReplica2.getDatanodeDetails())); - assertThat(repSources).doesNotContain(quasiReplica.getDatanodeDetails()); - } - - /** - * 2 quasi-closed replicas - * 1 unhealthy replica - * SCM state is quasi-closed. - * Expectation: The replicate container command should only contain the - * quasi-closed replicas as sources. - */ - @Test - public void testOnlyMatchingQuasiClosedReplicasReplicated() - throws IOException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica quasiReplica1 = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica quasiReplica2 = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final DatanodeDetails datanodeDetails = randomDatanodeDetails(); - final ContainerReplica unhealthyReplica = getReplicas( - id, UNHEALTHY, 1000L, datanodeDetails.getUuid(), datanodeDetails); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, quasiReplica1); - containerStateManager.updateContainerReplica(id, quasiReplica2); - containerStateManager.updateContainerReplica(id, unhealthyReplica); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - Optional cmdOptional = - datanodeCommandHandler.getReceivedCommands().stream().findFirst(); - assertTrue(cmdOptional.isPresent()); - SCMCommand scmCmd = cmdOptional.get().getCommand(); - assertInstanceOf(ReplicateContainerCommand.class, scmCmd); - ReplicateContainerCommand repCmd = (ReplicateContainerCommand) scmCmd; - - // Only the quasi closed replicas should have been used as a sources. - List repSources = repCmd.getSourceDatanodes(); - assertEquals(2, repSources.size()); - assertThat(repSources).containsAll( - Arrays.asList(quasiReplica1.getDatanodeDetails(), - quasiReplica2.getDatanodeDetails())); - assertThat(repSources).doesNotContain(unhealthyReplica.getDatanodeDetails()); - } - - /** - * Container is closed. - * 2 quasi-closed replicas. - * 1 unhealthy replica. - * All replicas have unique origin node IDs. - * Quasi closed replicas BCS IDs match closed container's BCS ID. - * - * Expectation: - * Iteration 1: Quasi closed replicas are closed since their BCS IDs - * match the closed container state. - * Iteration 2: The now closed replicas are replicated. - * Iteration 3: The unhealthy replica is deleted. - */ - @Test - public void testCloseableContainerWithUniqueUnhealthyReplica() - throws Exception { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - ContainerReplica quasi1 = addReplicaToDn(container, - randomDatanodeDetails(), QUASI_CLOSED, container.getSequenceId()); - ContainerReplica quasi2 = addReplicaToDn(container, - randomDatanodeDetails(), QUASI_CLOSED, container.getSequenceId()); - ContainerReplica unhealthyReplica = addReplicaToDn(container, - randomDatanodeDetails(), - UNHEALTHY, - 900L); - - // First RM iteration. - // The quasi containers should be closed since their BCSIDs match the - // closed container's state. - assertDeleteScheduled(0); - // All the containers are unhealthy, so it will not be counted as under - // replicated. - assertUnderReplicatedCount(0); - assertEquals(2, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - - // Update RM with the results of the close commands. - ContainerReplica closedRep1 = getReplicas( - container.containerID(), CLOSED, - container.getSequenceId(), quasi1.getDatanodeDetails()) - .stream().findFirst().get(); - ContainerReplica closedRep2 = getReplicas( - container.containerID(), CLOSED, - container.getSequenceId(), quasi2.getDatanodeDetails()) - .stream().findFirst().get(); - - containerStateManager.updateContainerReplica(container.containerID(), - closedRep1); - containerStateManager.updateContainerReplica(container.containerID(), - closedRep2); - - // Second RM iteration - // Now that we have healthy replicas, they should be replicated. - assertDeleteScheduled(0); - assertUnderReplicatedCount(1); - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - - // Process the replicate command and report the replica back to SCM. - List replicateCommands = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() - .equals(SCMCommandProto.Type.replicateContainerCommand)) - .collect(Collectors.toList()); - - // Report the new replica to SCM. - for (CommandForDatanode replicateCommand: replicateCommands) { - DatanodeDetails newNode = createDatanodeDetails( - replicateCommand.getDatanodeId()); - ContainerReplica newReplica = getReplicas( - container.containerID(), CLOSED, - container.getSequenceId(), newNode.getUuid(), newNode); - containerStateManager.updateContainerReplica(container.containerID(), - newReplica); - } - - // Third RM iteration - // The unhealthy replica can be deleted since we have 3 healthy copies - // of a closed container. - assertDeleteScheduled(1); - assertUnderReplicatedCount(0); - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertExactDeleteTargets(unhealthyReplica.getDatanodeDetails()); - } - - /** - * In small clusters, handling an under replicated container can get - * blocked because DNs are occupied by unhealthy replicas. This - * would make the placement policy throw an exception because it could - * not find any target datanodes for new replicas. - * - * Situation: - * Consider a CLOSED container with replicas 1 CLOSED, 1 QUASI_CLOSED with - * same seq id as the container, and 1 QUASI_CLOSED with smaller seq id. - * Placement policy is mocked to simulate no other target DNs are available. - * - * Expectation: - * 1st iteration: QUASI_CLOSED with same seq id should get closed, and the - * one with smaller seq id should get deleted to free up a DN. - * 2nd iteration: Any CLOSED replica should be replicated. - * 3rd iteration: Container should be OK now. - */ - @Test - public void testUnderReplicationBlockedByUnhealthyReplicas() - throws IOException, TimeoutException { - /* - In the first iteration, throw an SCMException to simulate that placement - policy could not find any targets. In the second iteration, return a list - of required targets. - */ - when(ratisContainerPlacementPolicy.chooseDatanodes(any(), any(), anyInt(), anyLong(), anyLong())) - .thenAnswer(invocation -> { - throw new SCMException( - SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); - }) - .thenAnswer(invocation -> { - int nodesRequired = invocation.getArgument(2); - List nodes = new ArrayList<>(nodesRequired); - while (nodesRequired != 0) { - nodes.add(MockDatanodeDetails.randomDatanodeDetails()); - nodesRequired--; - } - return nodes; - }); - - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplicaToDn(container, randomDatanodeDetails(), CLOSED, - container.getSequenceId()); - ContainerReplica quasiToDelete = addReplicaToDn(container, - randomDatanodeDetails(), QUASI_CLOSED, container.getSequenceId() - 1); - ContainerReplica quasi2 = addReplicaToDn(container, - randomDatanodeDetails(), QUASI_CLOSED, container.getSequenceId()); - - // First RM iteration. - // this container is under replicated by 2 replicas. - // quasi2 should be closed since its BCSID matches the container's. - // delete command should be sent for quasiToDelete to unblock under rep - // handling. - assertDeleteScheduled(1); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.closeContainerCommand, - quasi2.getDatanodeDetails())); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - quasiToDelete.getDatanodeDetails())); - assertUnderReplicatedCount(1); - - // Update RM with the results of the close and delete commands - ContainerReplica quasiToClosed = getReplicaBuilder( - container.containerID(), CLOSED, quasi2.getBytesUsed(), - quasi2.getKeyCount(), container.getSequenceId(), - quasi2.getOriginDatanodeId(), quasi2.getDatanodeDetails()).build(); - containerStateManager.updateContainerReplica(container.containerID(), - quasiToClosed); - containerStateManager.removeContainerReplica( - container.containerID(), quasiToDelete); - - // Second RM iteration - // Now that we have a free DN, a closed replica should be replicated - assertReplicaScheduled(1); - assertUnderReplicatedCount(1); - - // Process the replicate command and report the replica back to SCM. - List replicateCommands = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() - .equals(SCMCommandProto.Type.replicateContainerCommand)) - .collect(Collectors.toList()); - assertEquals(1, replicateCommands.size()); - // Report the new replica to SCM. - for (CommandForDatanode replicateCommand: replicateCommands) { - DatanodeDetails newNode = createDatanodeDetails( - replicateCommand.getDatanodeId()); - ContainerReplica newReplica = getReplicas( - container.containerID(), CLOSED, - container.getSequenceId(), newNode.getUuid(), newNode); - containerStateManager.updateContainerReplica(container.containerID(), - newReplica); - } - - // Third RM iteration - assertReplicaScheduled(0); - assertUnderReplicatedCount(0); - assertOverReplicatedCount(0); - } - - /** - * Test for when a quasi_closed container's under replication cannot be - * solved because there are UNHEALTHY replicas occupying datanodes. - */ - @Test - public void testUnderRepQuasiClosedContainerBlockedByUnhealthyReplicas() - throws IOException, TimeoutException { - when(ratisContainerPlacementPolicy.chooseDatanodes(anyList(), any(), anyInt(), anyLong(), anyLong())) - .thenAnswer(invocation -> { - List excluded = invocation.getArgument(0); - if (excluded.size() == 3) { - throw new SCMException( - SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); - } else { - int nodesRequired = invocation.getArgument(2); - List nodes = new ArrayList<>(nodesRequired); - while (nodesRequired != 0) { - DatanodeDetails dn = - MockDatanodeDetails.randomDatanodeDetails(); - nodeManager.register(dn, NodeStatus.inServiceHealthy()); - nodes.add(dn); - nodesRequired--; - } - return nodes; - } - }); - - final ContainerInfo container = - createContainer(LifeCycleState.QUASI_CLOSED); - ContainerReplica quasi1 = addReplicaToDn(container, - randomDatanodeDetails(), QUASI_CLOSED, container.getSequenceId()); - DatanodeDetails nodeForQuasi2 = - MockDatanodeDetails.randomDatanodeDetails(); - nodeManager.register(nodeForQuasi2, NodeStatus.inServiceHealthy()); - ContainerReplica quasi2 = getReplicaBuilder(container.containerID(), - QUASI_CLOSED, container.getUsedBytes(), container.getNumberOfKeys(), - container.getSequenceId(), quasi1.getOriginDatanodeId(), - nodeForQuasi2).build(); - containerStateManager - .updateContainerReplica(container.containerID(), quasi2); - ContainerReplica unhealthy = addReplicaToDn(container, - randomDatanodeDetails(), UNHEALTHY, container.getSequenceId()); - - // First RM iteration. - // this container is under replicated by 1 replica. - // delete command should be sent for unhealthy to unblock under rep - // handling. - assertDeleteScheduled(1); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - unhealthy.getDatanodeDetails())); - - List commands = datanodeCommandHandler - .getReceivedCommands(); - assertEquals(1, commands.size()); - DeleteContainerCommand deleteContainerCommand = - (DeleteContainerCommand) commands.get(0).getCommand(); - assertTrue(deleteContainerCommand.isForce()); - - assertUnderReplicatedCount(1); - // Update RM with the result of delete command - containerStateManager.removeContainerReplica( - container.containerID(), unhealthy); - - // Second RM iteration - // Now that we have a free DN, a quasi_closed replica should be replicated - assertReplicaScheduled(1); - assertUnderReplicatedCount(1); - - // Process the replicate command and report the replica back to SCM. - List replicateCommands = - datanodeCommandHandler.getReceivedCommands().stream() - .filter(command -> command.getCommand().getType() - .equals(SCMCommandProto.Type.replicateContainerCommand)) - .collect(Collectors.toList()); - assertEquals(1, replicateCommands.size()); - ReplicateContainerCommand command = (ReplicateContainerCommand) - replicateCommands.iterator().next().getCommand(); - List sources = command.getSourceDatanodes(); - assertThat(sources).contains(quasi1.getDatanodeDetails(), quasi2.getDatanodeDetails()); - ContainerReplica replica3 = - getReplicas(container.containerID(), QUASI_CLOSED, - container.getSequenceId(), quasi1.getOriginDatanodeId(), - MockDatanodeDetails.randomDatanodeDetails()); - containerStateManager.updateContainerReplica(container.containerID(), - replica3); - - // Third RM iteration - assertReplicaScheduled(0); - assertUnderReplicatedCount(0); - assertOverReplicatedCount(0); - } - - /** - * $numReplicas unhealthy replicas. - * Expectation: The remaining replicas are scheduled. - */ - @ParameterizedTest - @ValueSource(ints = {1, 2}) - public void testUnderReplicatedWithOnlyUnhealthyReplicas(int numReplicas) - throws Exception { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - for (int i = 0; i < numReplicas; i++) { - addReplica(container, NodeStatus.inServiceHealthy(), UNHEALTHY); - } - int numReplicasNeeded = HddsProtos.ReplicationFactor.THREE_VALUE - - numReplicas; - assertReplicaScheduled(numReplicasNeeded); - assertUnderReplicatedCount(1); - } - - @ParameterizedTest - @EnumSource(value = LifeCycleState.class, - names = {"CLOSED", "QUASI_CLOSED"}) - public void testUnderReplicatedWithOnlyUnhealthyReplicasDecommission( - LifeCycleState state) - throws Exception { - final ContainerInfo container = createContainer(state); - for (int i = 0; i < 2; i++) { - addReplica(container, NodeStatus.inServiceHealthy(), UNHEALTHY); - } - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), - UNHEALTHY); - assertReplicaScheduled(1); - assertUnderReplicatedCount(1); - // Run again, and there should be a pending add scheduled, so nothing - // else should get scheduled. - assertReplicaScheduled(0); - } - - @ParameterizedTest - @EnumSource(value = LifeCycleState.class, - names = {"CLOSED", "QUASI_CLOSED"}) - public void testOverReplicatedWithOnlyUnhealthyReplicas( - LifeCycleState state) throws Exception { - final ContainerInfo container = createContainer(state); - for (int i = 0; i < 4; i++) { - addReplica(container, NodeStatus.inServiceHealthy(), UNHEALTHY); - } - assertDeleteScheduled(1); - assertUnderReplicatedCount(0); - assertOverReplicatedCount(1); - - // Run again, and there should be a pending delete scheduled, so nothing - // else should get scheduled. - assertDeleteScheduled(0); - } - - /** - * 1 unhealthy replica. - * 4 closed replicas. - * Expectation: - * Iteration 1: The unhealthy replica should be deleted. - * Iteration 2: One of the closed replicas should be deleted. - */ - @Test - public void testOverReplicatedClosedAndUnhealthy() throws Exception { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - ContainerReplica unhealthy = addReplica(container, - NodeStatus.inServiceHealthy(), UNHEALTHY); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - - // This method does one run of replication manager. - assertReplicaScheduled(0); - assertUnderReplicatedCount(0); - boolean unhealthyDeleted = false; - boolean closedDeleted = false; - - for (CommandForDatanode command : - datanodeCommandHandler.getReceivedCommands()) { - if (command.getCommand().getType() == - SCMCommandProto.Type.deleteContainerCommand) { - if (command.getDatanodeId() == - unhealthy.getDatanodeDetails().getUuid()) { - unhealthyDeleted = true; - } else { - closedDeleted = true; - } - } - } - - assertTrue(unhealthyDeleted); - assertFalse(closedDeleted); - - containerStateManager.removeContainerReplica( - container.containerID(), unhealthy); - - // Do a second run. - assertReplicaScheduled(0); - assertUnderReplicatedCount(0); - for (CommandForDatanode command : - datanodeCommandHandler.getReceivedCommands()) { - if (command.getCommand().getType() == - SCMCommandProto.Type.deleteContainerCommand) { - if (command.getDatanodeId() == - unhealthy.getDatanodeDetails().getUuid()) { - unhealthyDeleted = true; - } else { - closedDeleted = true; - } - } - } - - assertTrue(unhealthyDeleted); - assertTrue(closedDeleted); - } - - /** - * 4 unhealthy replicas. - * Expectation: One unhealthy replica should be deleted. - */ - @Test - public void testOverReplicatedUnhealthy() throws Exception { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - Set unhealthyContainerDNIDs = new HashSet<>(); - - final int numReplicas = 4; - for (int i = 0; i < numReplicas; i++) { - ContainerReplica replica = addReplica(container, - NodeStatus.inServiceHealthy(), UNHEALTHY); - unhealthyContainerDNIDs.add(replica.getDatanodeDetails().getUuid()); - } - - // No replications should be scheduled. - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(0, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertUnderReplicatedCount(0); - - // One replica should be deleted. - assertEquals(1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertTrue( - datanodeCommandHandler.getReceivedCommands().stream() - .anyMatch(c -> c.getCommand().getType() == - SCMCommandProto.Type.deleteContainerCommand && - unhealthyContainerDNIDs.contains(c.getDatanodeId()))); - } - - /** - * 4 quasi-closed replicas. - * All have same origin node ID. - * Expectation: One of the replicas is deleted. - */ - @Test - public void testOverReplicatedQuasiClosedContainer() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(101); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFour = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica( - id, replicaThree); - containerStateManager.updateContainerReplica(id, replicaFour); - - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentDeleteCommandCount + 1, - datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand)); - assertEquals(currentDeleteCommandCount + 1, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - assertEquals(1, getInflightCount(InflightType.DELETION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightDeletion()); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.OVER_REPLICATED)); - - // Now we remove the replica according to inflight - DatanodeDetails targetDn = - replicationManager.getLegacyReplicationManager() - .getFirstDatanode(InflightType.DELETION, id); - if (targetDn.equals(replicaOne.getDatanodeDetails())) { - containerStateManager.removeContainerReplica( - id, replicaOne); - } else if (targetDn.equals(replicaTwo.getDatanodeDetails())) { - containerStateManager.removeContainerReplica( - id, replicaTwo); - } else if (targetDn.equals(replicaThree.getDatanodeDetails())) { - containerStateManager.removeContainerReplica( - id, replicaThree); - } else if (targetDn.equals(replicaFour.getDatanodeDetails())) { - containerStateManager.removeContainerReplica( - id, replicaFour); - } - - final long currentDeleteCommandCompleted = replicationManager.getMetrics() - .getReplicasDeletedTotal(); - final long deleteBytesCompleted = - replicationManager.getMetrics().getDeletionBytesCompletedTotal(); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(0, getInflightCount(InflightType.DELETION)); - assertEquals(0, replicationManager.getMetrics() - .getInflightDeletion()); - assertEquals(currentDeleteCommandCompleted + 1, - replicationManager.getMetrics().getReplicasDeletedTotal()); - assertEquals(deleteBytesCompleted + 101, - replicationManager.getMetrics().getDeletionBytesCompletedTotal()); - - report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.OVER_REPLICATED)); - } - - /** - * 2 open replicas. - * 1 unhealthy replica. - * Expectation: Container is closed. - * - * ReplicationManager should close the unhealthy OPEN container. - */ - @Test - public void testUnhealthyOpenContainer() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.OPEN); - final ContainerID id = container.containerID(); - final Set replicas = getReplicas(id, State.OPEN, - randomDatanodeDetails(), - randomDatanodeDetails()); - replicas.addAll(getReplicas(id, UNHEALTHY, randomDatanodeDetails())); - - containerStateManager.addContainer(container.getProtobuf()); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - final CloseContainerEventHandler closeContainerHandler = mock(CloseContainerEventHandler.class); - eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler); - - replicationManager.processAll(); - eventQueue.processAll(1000); - verify(closeContainerHandler, times(1)).onMessage(id, eventQueue); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.OPEN)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.OPEN_UNHEALTHY)); - } - - /** - * 1 unhealthy replica. - * 2 open replicas. - * Expectation: Close command should be sent to open replicas only. - * - * ReplicationManager should skip send close command to unhealthy replica. - */ - @Test - public void testCloseUnhealthyReplica() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final ContainerID id = container.containerID(); - final Set replicas = getReplicas(id, UNHEALTHY, - randomDatanodeDetails()); - replicas.addAll(getReplicas(id, State.OPEN, randomDatanodeDetails())); - replicas.addAll(getReplicas(id, State.OPEN, randomDatanodeDetails())); - - containerStateManager.addContainer(container.getProtobuf()); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - replicationManager.processAll(); - // Wait for EventQueue to call the event handler - eventQueue.processAll(1000); - assertEquals(2, - datanodeCommandHandler.getInvocation()); - } - - /** - * 1 unhealthy replica. - * 3 quasi closed replicas. - * All have same origin node ID. - * Expectation: Unhealthy replica is deleted. - * - * When a QUASI_CLOSED container is over replicated, ReplicationManager - * deletes the excess replicas. While choosing the replica for deletion - * ReplicationManager should prioritize deleting the unhealthy replica over - * QUASI_CLOSED replica if the unhealthy replica does not have a unique - * origin node ID. - */ - @Test - public void testQuasiClosedContainerWithExtraUnhealthyReplica() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica unhealthyReplica = getReplicas( - id, UNHEALTHY, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFour = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, unhealthyReplica); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - containerStateManager.updateContainerReplica(id, replicaFour); - - assertDeleteScheduled(1); - assertTrue( - datanodeCommandHandler.getReceivedCommands().stream() - .anyMatch(c -> c.getCommand().getType() == - SCMCommandProto.Type.deleteContainerCommand && - c.getDatanodeId().equals( - unhealthyReplica.getDatanodeDetails().getUuid()))); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - // Container should have been considered over replicated including the - // unhealthy replica. - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.OVER_REPLICATED)); - - final long currentDeleteCommandCompleted = replicationManager.getMetrics() - .getReplicasDeletedTotal(); - // Now we remove the replica to simulate deletion complete - containerStateManager.removeContainerReplica(id, unhealthyReplica); - - // On the next run, the over replicated status should be reconciled and - // the delete completed. - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertEquals(currentDeleteCommandCompleted + 1, - replicationManager.getMetrics().getReplicasDeletedTotal()); - assertEquals(0, getInflightCount(InflightType.DELETION)); - assertEquals(0, replicationManager.getMetrics() - .getInflightDeletion()); - - report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.OVER_REPLICATED)); - } - - /** - * 2 quasi-closed replicas. - * Expectation: Replicate one of the replicas. - */ - @Test - public void testUnderReplicatedQuasiClosedContainer() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(100); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - - final int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - final long currentBytesToReplicate = replicationManager.getMetrics() - .getReplicationBytesTotal(); - - // On the first iteration, one of the quasi closed replicas should be - // replicated. - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentReplicateCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount + 1, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - assertEquals(currentBytesToReplicate + 100, - replicationManager.getMetrics().getReplicationBytesTotal()); - assertEquals(1, getInflightCount(InflightType.REPLICATION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightReplication()); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - - final long currentReplicateCommandCompleted = replicationManager - .getMetrics().getReplicasCreatedTotal(); - final long currentReplicateBytesCompleted = replicationManager - .getMetrics().getReplicationBytesCompletedTotal(); - - // Now we add the replicated new replica - DatanodeDetails targetDn = - replicationManager.getLegacyReplicationManager() - .getFirstDatanode(InflightType.REPLICATION, id); - final ContainerReplica replicatedReplicaThree = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, targetDn); - containerStateManager.updateContainerReplica( - id, replicatedReplicaThree); - - // On the next run, no additional replications should be scheduled. - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertEquals(currentReplicateCommandCompleted + 1, - replicationManager.getMetrics().getReplicasCreatedTotal()); - assertEquals(currentReplicateBytesCompleted + 100, - replicationManager.getMetrics().getReplicationBytesCompletedTotal()); - assertEquals(0, getInflightCount(InflightType.REPLICATION)); - assertEquals(0, replicationManager.getMetrics() - .getInflightReplication()); - - report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - } - - /** - * 1 quasi-closed replica. - * 1 unhealthy replica. - * All have same origin node ID. - * Expectation: - * - * In the first iteration of ReplicationManager, it should re-replicate - * the quasi closed replicas so that there are 3 of them. - * - * In the second iteration, ReplicationManager should delete the unhealthy - * replica since its origin node ID is not unique. - */ - @Test - public void testUnderReplicatedQuasiClosedContainerWithUnhealthyReplica() - throws IOException, InterruptedException, - TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(99); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, - randomDatanodeDetails()); - final ContainerReplica unhealthyReplica = getReplicas( - id, UNHEALTHY, 1000L, originNodeId, - randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, unhealthyReplica); - - final int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - final long currentBytesToDelete = replicationManager.getMetrics() - .getDeletionBytesTotal(); - - // Run first iteraiton - - replicationManager.processAll(); - GenericTestUtils.waitFor( - () -> (currentReplicateCommandCount + 2) == datanodeCommandHandler - .getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand), - 50, 5000); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, - report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNHEALTHY)); - - List replicateCommands = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() - .equals(SCMCommandProto.Type.replicateContainerCommand)) - .collect(Collectors.toList()); - - assertEquals(2, replicateCommands.size()); - - // Report the two new replicas to SCM. - for (CommandForDatanode replicateCommand: replicateCommands) { - DatanodeDetails newNode = createDatanodeDetails( - replicateCommand.getDatanodeId()); - ContainerReplica newReplica = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, newNode); - containerStateManager.updateContainerReplica(id, newReplica); - } - - // Run second iteration. - // Now that the quasi closed replica is sufficiently replicated, SCM - // should delete the unhealthy replica on the next iteration. - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentDeleteCommandCount + 1, - datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand)); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - unhealthyReplica.getDatanodeDetails())); - assertEquals(currentDeleteCommandCount + 1, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - assertEquals(currentBytesToDelete + 99, - replicationManager.getMetrics().getDeletionBytesTotal()); - assertEquals(1, - getInflightCount(InflightType.DELETION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightDeletion()); - - containerStateManager.removeContainerReplica(id, unhealthyReplica); - - report = replicationManager.getContainerReport(); - assertEquals(1, - report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNHEALTHY)); - } - - /** - * If a QUASI_CLOSED container and a QUASI_CLOSED replica have the same - * sequence ID, it's not guaranteed that this is the latest sequence ID. - * Such a replica should not be closed. - */ - @Test - public void testQuasiClosedContainerAndReplicaWithSameSequenceID() - throws IOException, TimeoutException { - /* - Create an under replicated QUASI_CLOSED container with 2 QUASI_CLOSED - replicas. All have the container's sequence ID. - */ - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - container.setUsedBytes(100); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, container.getSequenceId(), originNodeId, - randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, container.getSequenceId(), originNodeId, - randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - final int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - final long currentBytesToReplicate = replicationManager.getMetrics() - .getReplicationBytesTotal(); - - /* - One of the quasi closed replicas should be replicated and no close - commands should be sent. - */ - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentCloseCommandCount, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - - // One replication command should be sent - assertEquals(currentReplicateCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount + 1, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - assertEquals( - currentBytesToReplicate + container.getUsedBytes(), - replicationManager.getMetrics().getReplicationBytesTotal()); - assertEquals(1, getInflightCount(InflightType.REPLICATION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightReplication()); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - } - - /** - * 3 quasi-closed replicas. - * All unique origin IDs. - * Expectation: Container is closed. - * - * When a container is QUASI_CLOSED and it has >50% of its replica - * in QUASI_CLOSED state with unique origin node id, - * ReplicationManager should force close the replica(s) with - * highest BCSID. - */ - @Test - public void testQuasiClosedToClosed() throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final Set replicas = getReplicas(id, QUASI_CLOSED, - randomDatanodeDetails(), - randomDatanodeDetails(), - randomDatanodeDetails()); - containerStateManager.addContainer(container.getProtobuf()); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - - // All the replicas have same BCSID, so all of them will be closed. - assertEquals(currentCloseCommandCount + 3, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, - report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - } - - /** - * 2 quasi-closed replicas. - * 2 unique origin IDs with different BCSIDs. - * Expectation: Container is closed, then replicated on the next iteration. - */ - @Test - public void testCloseableIsClosedBeforeReplication() throws IOException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final ContainerReplica replicaOne = getReplicas( - id, QUASI_CLOSED, 1000L, UUID.randomUUID(), randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, QUASI_CLOSED, 900L, UUID.randomUUID(), randomDatanodeDetails()); - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - - // Since we have 2/3 replicas, the container should be closed on this - // iteration. The replica with the higher BCSID can be closed. - int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertEquals(currentCloseCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - - // The highest BCSID replica should have been closed. - Optional cmd = - datanodeCommandHandler.getReceivedCommands().stream().findFirst(); - assertTrue(cmd.isPresent()); - assertEquals(replicaOne.getDatanodeDetails().getUuid(), - cmd.get().getDatanodeId()); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - // Container will register as closed after the closed replica is - // reported in the next iteration. - assertEquals(0, - report.getStat(LifeCycleState.CLOSED)); - // Legacy replication manager does not count over/under replicated - // status until the container is eligible to be replicated. - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - assertEquals(1, - report.getStat(LifeCycleState.QUASI_CLOSED)); - assertEquals(0, report.getStat( - ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); - - // Move the higher BCSID replica to closed state. - final ContainerReplica closedReplicaOne = getReplicas( - id, CLOSED, 1000L, replicaOne.getOriginDatanodeId(), - replicaOne.getDatanodeDetails()); - containerStateManager.updateContainerReplica(id, closedReplicaOne); - - // On the next iteration, the container should be replicated. - currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - // No more close commands should be sent. - assertEquals(currentCloseCommandCount, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.closeContainerCommand)); - // Two replicate commands should be triggered for the now closed - // container. - assertEquals(currentReplicateCommandCount + 2, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount + 2, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - assertEquals(1, getInflightCount(InflightType.REPLICATION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightReplication()); - - // Once the replicas are closed, moving the container to CLOSED state - // in SCM is done on the container report, not in the replication manager. - } - } - - /** - * Tests replication manager handling of decommissioning and maintainence - * mode datanodes. - */ - @Nested - class DecomAndMaintenance { - /** - * ReplicationManager should replicate an additional replica if there are - * decommissioned replicas. - */ - @Test - public void testUnderReplicatedDueToOutOfService() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); - assertReplicaScheduled(2); - assertUnderReplicatedCount(1); - } - - /** - * ReplicationManager should replicate an additional replica when all copies - * are decommissioning. - */ - @Test - public void testUnderReplicatedDueToAllDecommission() - throws IOException, TimeoutException { - runTestUnderReplicatedDueToAllDecommission(3); - } - - Void runTestUnderReplicatedDueToAllDecommission(int expectedReplication) - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); - assertReplicaScheduled(expectedReplication); - assertUnderReplicatedCount(1); - return null; - } - - @Test - public void testReplicationLimit() throws Exception { - runTestLimit(1, 0, 2, 0, - () -> runTestUnderReplicatedDueToAllDecommission(1)); - } - - /** - * ReplicationManager should not take any action when the container is - * correctly replicated with decommissioned replicas still present. - */ - @Test - public void testCorrectlyReplicatedWithDecommission() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); - assertReplicaScheduled(0); - assertUnderReplicatedCount(0); - } - - /** - * ReplicationManager should replicate an additional replica when min rep - * is not met for maintenance. - */ - @Test - public void testUnderReplicatedDueToMaintenance() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - assertReplicaScheduled(1); - assertUnderReplicatedCount(1); - } - - /** - * ReplicationManager should not replicate an additional replica when if - * min replica for maintenance is 1 and another replica is available. - */ - @Test - public void testNotUnderReplicatedDueToMaintenanceMinRepOne() - throws Exception { - replicationManager.stop(); - ReplicationManagerConfiguration newConf = newRMConfig(); - newConf.setMaintenanceReplicaMinimum(1); - dbStore.close(); - createReplicationManager(newConf); - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - assertReplicaScheduled(0); - assertUnderReplicatedCount(0); - } - - /** - * ReplicationManager should replicate an additional replica when all copies - * are going off line and min rep is 1. - */ - @Test - public void testUnderReplicatedDueToMaintenanceMinRepOne() - throws Exception { - replicationManager.stop(); - ReplicationManagerConfiguration newConf = newRMConfig(); - newConf.setMaintenanceReplicaMinimum(1); - dbStore.close(); - createReplicationManager(newConf); - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - assertReplicaScheduled(1); - assertUnderReplicatedCount(1); - } - - /** - * ReplicationManager should replicate additional replica when all copies - * are going into maintenance. - */ - @Test - public void testUnderReplicatedDueToAllMaintenance() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - assertReplicaScheduled(2); - assertUnderReplicatedCount(1); - } - - /** - * ReplicationManager should not replicate additional replica sufficient - * replica are available. - */ - @Test - public void testCorrectlyReplicatedWithMaintenance() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - assertReplicaScheduled(0); - assertUnderReplicatedCount(0); - } - - /** - * ReplicationManager should replicate additional replica when all copies - * are decommissioning or maintenance. - */ - @Test - public void testUnderReplicatedWithDecommissionAndMaintenance() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONED, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONED, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - assertReplicaScheduled(2); - assertUnderReplicatedCount(1); - } - - /** - * When a CLOSED container is over replicated, ReplicationManager - * deletes the excess replicas. While choosing the replica for deletion - * ReplicationManager should not attempt to remove a DECOMMISSION or - * MAINTENANCE replica. - */ - @Test - public void testOverReplicatedClosedContainerWithDecomAndMaint() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONED, HEALTHY), CLOSED); - addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - addReplica(container, NodeStatus.inServiceHealthy(), CLOSED); - - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentDeleteCommandCount + 2, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertEquals(currentDeleteCommandCount + 2, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - assertEquals(1, getInflightCount(InflightType.DELETION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightDeletion()); - // Get the DECOM and Maint replica and ensure none of them are scheduled - // for removal - Set decom = - containerStateManager.getContainerReplicas( - container.containerID()) - .stream() - .filter(r -> - r.getDatanodeDetails().getPersistedOpState() != IN_SERVICE) - .collect(Collectors.toSet()); - for (ContainerReplica r : decom) { - assertFalse(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - r.getDatanodeDetails())); - } - assertOverReplicatedCount(1); - } - - /** - * Replication Manager should not attempt to replicate from an unhealthy - * (stale or dead) node. To test this, setup a scenario where a replia needs - * to be created, but mark all nodes stale. That way, no new replica will be - * scheduled. - */ - @Test - public void testUnderReplicatedNotHealthySource() - throws IOException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - addReplica(container, NodeStatus.inServiceStale(), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONED, STALE), CLOSED); - addReplica(container, new NodeStatus(DECOMMISSIONED, STALE), CLOSED); - // There should be replica scheduled, but as all nodes are stale, nothing - // gets scheduled. - assertReplicaScheduled(0); - assertUnderReplicatedCount(1); - } - } - - private static ReplicationManagerConfiguration newRMConfig() { - ReplicationManagerConfiguration conf = - new ReplicationManagerConfiguration(); - conf.setEnableLegacy(true); - return conf; - } - - /** - * Tests replication manager move command. - */ - @Nested - class Move { - /** - * if all the prerequisites are satisfied, move should work as expected. - */ - @Test - public void testMove() throws IOException, NodeNotFoundException, - InterruptedException, ExecutionException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - ContainerID id = container.containerID(); - ContainerReplica dn1 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - DatanodeDetails dn3 = addNode(new NodeStatus(IN_SERVICE, HEALTHY)); - CompletableFuture cf = - replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertThat(scmLogs.getOutput()).contains( - "receive a move request about container"); - Thread.sleep(100L); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.replicateContainerCommand, dn3)); - assertEquals(1, datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - //replicate container to dn3 - addReplicaToDn(container, dn3, CLOSED); - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - dn1.getDatanodeDetails())); - assertEquals(1, datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - containerStateManager.removeContainerReplica(id, dn1); - - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertTrue( - cf.isDone() && cf.get() == MoveManager.MoveResult.COMPLETED); - } - - /** - * if crash happened and restarted, move option should work as expected. - */ - @Test - public void testMoveCrashAndRestart() throws IOException, - NodeNotFoundException, InterruptedException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - ContainerID id = container.containerID(); - ContainerReplica dn1 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - DatanodeDetails dn3 = addNode(new NodeStatus(IN_SERVICE, HEALTHY)); - replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertThat(scmLogs.getOutput()).contains( - "receive a move request about container"); - Thread.sleep(100L); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.replicateContainerCommand, dn3)); - assertEquals(1, datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - //crash happens, restart scm. - //clear current inflight actions and reload inflightMove from DBStore. - resetReplicationManager(); - replicationManager.getMoveScheduler() - .reinitialize(SCMDBDefinition.MOVE.getTable(dbStore)); - assertTrue(replicationManager.getMoveScheduler() - .getInflightMove().containsKey(id)); - MoveDataNodePair kv = replicationManager.getMoveScheduler() - .getInflightMove().get(id); - assertEquals(kv.getSrc(), dn1.getDatanodeDetails()); - assertEquals(kv.getTgt(), dn3); - serviceManager.notifyStatusChanged(); - - Thread.sleep(100L); - // now, the container is not over-replicated, - // so no deleteContainerCommand will be sent - assertFalse(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - dn1.getDatanodeDetails())); - //replica does not exist in target datanode, so a - // replicateContainerCommand will be sent again at - // notifyStatusChanged#onLeaderReadyAndOutOfSafeMode - assertEquals(2, datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - - //replicate container to dn3, now, over-replicated - addReplicaToDn(container, dn3, CLOSED); - replicationManager.processAll(); - eventQueue.processAll(1000); - - //deleteContainerCommand is sent, but the src replica is not deleted now - assertEquals(1, datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - - //crash happens, restart scm. - //clear current inflight actions and reload inflightMove from DBStore. - resetReplicationManager(); - replicationManager.getMoveScheduler() - .reinitialize(SCMDBDefinition.MOVE.getTable(dbStore)); - assertTrue(replicationManager.getMoveScheduler() - .getInflightMove().containsKey(id)); - kv = replicationManager.getMoveScheduler() - .getInflightMove().get(id); - assertEquals(kv.getSrc(), dn1.getDatanodeDetails()); - assertEquals(kv.getTgt(), dn3); - serviceManager.notifyStatusChanged(); - - //after restart and the container is over-replicated now, - //deleteContainerCommand will be sent again - assertEquals(2, datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - containerStateManager.removeContainerReplica(id, dn1); - - //replica in src datanode is deleted now - containerStateManager.removeContainerReplica(id, dn1); - replicationManager.processAll(); - eventQueue.processAll(1000); - - //since the move is complete,so after scm crash and restart - //inflightMove should not contain the container again - resetReplicationManager(); - replicationManager.getMoveScheduler() - .reinitialize(SCMDBDefinition.MOVE.getTable(dbStore)); - assertThat(replicationManager.getMoveScheduler() - .getInflightMove()).doesNotContainKey(id); - - //completeableFuture is not stored in DB, so after scm crash and - //restart ,completeableFuture is missing - } - - /** - * make sure RM does not delete replica if placement policy is not - * satisfied. - */ - @Test - public void testMoveNotDeleteSrcIfPolicyNotSatisfied() - throws IOException, NodeNotFoundException, - InterruptedException, ExecutionException, TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - ContainerID id = container.containerID(); - ContainerReplica dn1 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - ContainerReplica dn2 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - DatanodeDetails dn4 = addNode(new NodeStatus(IN_SERVICE, HEALTHY)); - CompletableFuture cf = - replicationManager.move(id, dn1.getDatanodeDetails(), dn4); - assertThat(scmLogs.getOutput()).contains( - "receive a move request about container"); - Thread.sleep(100L); - assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.replicateContainerCommand, dn4)); - assertEquals(1, datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - //replicate container to dn4 - addReplicaToDn(container, dn4, CLOSED); - //now, replication succeeds, but replica in dn2 lost, - //and there are only tree replicas totally, so rm should - //not delete the replica on dn1 - containerStateManager.removeContainerReplica(id, dn2); - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertFalse( - datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - dn1.getDatanodeDetails())); - - assertTrue(cf.isDone() && - cf.get() == MoveManager.MoveResult.DELETE_FAIL_POLICY); - } - - - /** - * test src and target datanode become unhealthy when moving. - */ - @Test - public void testDnBecameUnhealthyWhenMoving() throws IOException, - NodeNotFoundException, InterruptedException, ExecutionException, - TimeoutException { - final ContainerInfo container = createContainer(LifeCycleState.CLOSED); - ContainerID id = container.containerID(); - ContainerReplica dn1 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - DatanodeDetails dn3 = addNode(new NodeStatus(IN_SERVICE, HEALTHY)); - CompletableFuture cf = - replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertThat(scmLogs.getOutput()).contains( - "receive a move request about container"); - - nodeManager.setNodeStatus(dn3, new NodeStatus(IN_SERVICE, STALE)); - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY); - - nodeManager.setNodeStatus(dn3, new NodeStatus(IN_SERVICE, HEALTHY)); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - addReplicaToDn(container, dn3, CLOSED); - replicationManager.processAll(); - eventQueue.processAll(1000); - nodeManager.setNodeStatus(dn1.getDatanodeDetails(), - new NodeStatus(IN_SERVICE, STALE)); - replicationManager.processAll(); - eventQueue.processAll(1000); - - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.DELETION_FAIL_NODE_UNHEALTHY); - } - - /** - * before Replication Manager generates a completablefuture for a move - * option, some Prerequisites should be satisfied. - */ - @Test - public void testMovePrerequisites() throws IOException, - NodeNotFoundException, InterruptedException, ExecutionException, - InvalidStateTransitionException, TimeoutException { - //all conditions is met - final ContainerInfo container = createContainer(LifeCycleState.OPEN); - ContainerID id = container.containerID(); - ContainerReplica dn1 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - ContainerReplica dn2 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - DatanodeDetails dn3 = addNode(new NodeStatus(IN_SERVICE, HEALTHY)); - ContainerReplica dn4 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); - - CompletableFuture cf; - //the above move is executed successfully, so there may be some item in - //inflightReplication or inflightDeletion. here we stop replication - // manager to clear these states, which may impact the tests below. - //we don't need a running replicationManamger now - replicationManager.stop(); - Thread.sleep(100L); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR); - replicationManager.start(); - Thread.sleep(100L); - - //container in not in OPEN state - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED); - //open -> closing - containerStateManager.updateContainerState(id.getProtobuf(), - LifeCycleEvent.FINALIZE); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED); - //closing -> quasi_closed - containerStateManager.updateContainerState(id.getProtobuf(), - LifeCycleEvent.QUASI_CLOSE); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED); - - //quasi_closed -> closed - containerStateManager.updateContainerState(id.getProtobuf(), - LifeCycleEvent.FORCE_CLOSE); - assertSame(LifeCycleState.CLOSED, - containerStateManager.getContainer(id).getState()); - - //Node is not in healthy state - for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) { - if (state != HEALTHY) { - nodeManager.setNodeStatus(dn3, - new NodeStatus(IN_SERVICE, state)); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY); - cf = replicationManager.move(id, dn3, dn1.getDatanodeDetails()); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY); - } - } - nodeManager.setNodeStatus(dn3, new NodeStatus(IN_SERVICE, HEALTHY)); - - //Node is not in IN_SERVICE state - for (HddsProtos.NodeOperationalState state : - HddsProtos.NodeOperationalState.values()) { - if (state != IN_SERVICE) { - nodeManager.setNodeStatus(dn3, - new NodeStatus(state, HEALTHY)); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE); - cf = replicationManager.move(id, dn3, dn1.getDatanodeDetails()); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE); - } - } - nodeManager.setNodeStatus(dn3, new NodeStatus(IN_SERVICE, HEALTHY)); - - //container exists in target datanode - cf = replicationManager.move(id, dn1.getDatanodeDetails(), - dn2.getDatanodeDetails()); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_EXIST_IN_TARGET); - - //container does not exist in source datanode - cf = replicationManager.move(id, dn3, dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE); - - //make container over relplicated to test the - // case that container is in inflightDeletion - ContainerReplica dn5 = addReplica(container, - new NodeStatus(IN_SERVICE, HEALTHY), State.CLOSED); - replicationManager.processAll(); - //waiting for inflightDeletion generation - eventQueue.processAll(1000); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_DELETION); - resetReplicationManager(); - - //make the replica num be 2 to test the case - //that container is in inflightReplication - containerStateManager.removeContainerReplica(id, dn5); - containerStateManager.removeContainerReplica(id, dn4); - //replication manager should generate inflightReplication - replicationManager.processAll(); - //waiting for inflightReplication generation - eventQueue.processAll(1000); - cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3); - assertTrue(cf.isDone() && cf.get() == - MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_REPLICATION); - } - } - - /** - * Tests mis-replicated containers with rack topology information. - */ - @Nested - class MisReplicated { - - @Test - public void additionalReplicaScheduledWhenMisReplicated() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSED); - container.setUsedBytes(100); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica( - id, replicaThree); - - // Ensure a mis-replicated status is returned for any containers in this - // test where there are 3 replicas. When there are 2 or 4 replicas - // the status returned will be healthy. - when(ratisContainerPlacementPolicy.validateContainerPlacement( - argThat(list -> list.size() == 3), - anyInt() - )).thenAnswer(invocation -> { - return new ContainerPlacementStatusDefault(1, 2, 3); - }); - - int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - final long currentBytesToReplicate = replicationManager.getMetrics() - .getReplicationBytesTotal(); - - replicationManager.processAll(); - eventQueue.processAll(1000); - // At this stage, due to the mocked calls to validateContainerPlacement - // the policy will not be satisfied, and replication will be triggered. - - assertEquals(currentReplicateCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount + 1, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - assertEquals(currentBytesToReplicate + 100, - replicationManager.getMetrics().getReplicationBytesTotal()); - assertEquals(1, getInflightCount(InflightType.REPLICATION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightReplication()); - - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(1, report.getStat(LifeCycleState.CLOSED)); - assertEquals(1, report.getStat( - ReplicationManagerReport.HealthState.MIS_REPLICATED)); - - // Now make it so that all containers seem mis-replicated no matter how - // many replicas. This will test replicas are not scheduled if the new - // replica does not fix the mis-replication. - when(ratisContainerPlacementPolicy.validateContainerPlacement( - anyList(), - anyInt() - )).thenAnswer(invocation -> { - return new ContainerPlacementStatusDefault(1, 2, 3); - }); - - currentReplicateCommandCount = datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - // At this stage, due to the mocked calls to validateContainerPlacement - // the mis-replicated racks will not have improved, so expect to see - // nothing scheduled. - assertEquals(currentReplicateCommandCount, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - assertEquals(1, getInflightCount(InflightType.REPLICATION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightReplication()); - } - - @Test - public void overReplicatedButRemovingMakesMisReplicated() - throws IOException, TimeoutException { - // In this test, the excess replica should not be removed. - final ContainerInfo container = getContainer(LifeCycleState.CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFour = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica( - id, replicaThree); - containerStateManager.updateContainerReplica(id, replicaFour); - - // Ensure a mis-replicated status is returned for any containers in this - // test where there are exactly 3 replicas checked. - when(ratisContainerPlacementPolicy.validateContainerPlacement( - argThat(list -> list.size() == 3), - anyInt() - )).thenAnswer( - invocation -> new ContainerPlacementStatusDefault(1, 2, 3)); - - int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - // TODO the new (non-legacy) RM needs a separate handler for - // topology status to make progress in this case by: - // 1. Deleting the closed replica to restore proper replica count. - // 2. Deleting the unhealthy replica since there are adequate healthy - // replicas. - // 3. Fixing topology issues left by the previous cleanup tasks. - // Current legacy RM implementation will take no action in this case - // because deletion would compromise topology. - assertEquals(currentDeleteCommandCount, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertEquals(currentDeleteCommandCount, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - - assertEquals(0, getInflightCount(InflightType.DELETION)); - assertEquals(0, replicationManager.getMetrics() - .getInflightDeletion()); - assertOverReplicatedCount(1); - } - - @Test - public void testOverReplicatedAndPolicySatisfied() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFour = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica( - id, replicaThree); - containerStateManager.updateContainerReplica(id, replicaFour); - - when(ratisContainerPlacementPolicy.validateContainerPlacement( - argThat(list -> list.size() == 3), - anyInt() - )).thenAnswer( - invocation -> new ContainerPlacementStatusDefault(2, 2, 3)); - - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentDeleteCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertEquals(currentDeleteCommandCount + 1, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - assertEquals(1, getInflightCount(InflightType.DELETION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightDeletion()); - - assertOverReplicatedCount(1); - } - - - @Test - @Unhealthy("This test doesn't properly test Rack Placement Policy as" + - " LegacyReplicationManager doesn't handle rack aware delete properly.") - public void testOverReplicatedAndPolicyUnSatisfiedAndDeleted() - throws IOException, TimeoutException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFour = getReplicas( - id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFive = getReplicas( - id, QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.addContainer(container.getProtobuf()); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica( - id, replicaThree); - containerStateManager.updateContainerReplica(id, replicaFour); - containerStateManager.updateContainerReplica(id, replicaFive); - - when(ratisContainerPlacementPolicy.validateContainerPlacement( - argThat(list -> list != null && list.size() <= 4), - anyInt() - )).thenAnswer( - invocation -> new ContainerPlacementStatusDefault(1, 2, 3)); - - int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - // On the first run, RM will delete one of the extra closed replicas. - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentDeleteCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertEquals(currentDeleteCommandCount + 1, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - assertEquals(1, getInflightCount(InflightType.DELETION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightDeletion()); - - assertAnyDeleteTargets( - replicaOne.getDatanodeDetails(), - replicaTwo.getDatanodeDetails(), - replicaThree.getDatanodeDetails(), - replicaFour.getDatanodeDetails() - ); - - currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - // One the second run, the container is now properly replicated when - // counting in flight deletes. This allows the quasi closed container to - // be deleted by the unhealthy container handler. - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentDeleteCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertEquals(currentDeleteCommandCount + 1, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - assertEquals(1, getInflightCount(InflightType.DELETION)); - assertEquals(1, replicationManager.getMetrics() - .getInflightDeletion()); - - assertDeleteTargetsContain(replicaFive.getDatanodeDetails()); - } - } - - void runTestLimit(int replicationLimit, int deletionLimit, - int expectedReplicationSkipped, int expectedDeletionSkipped, - Callable testcase) throws Exception { - createReplicationManager(replicationLimit, deletionLimit); - - final ReplicationManagerMetrics metrics = replicationManager.getMetrics(); - final long replicationSkipped = metrics.getInflightReplicationSkipped(); - final long deletionSkipped = metrics.getInflightDeletionSkipped(); - - testcase.call(); - - assertEquals(replicationSkipped + expectedReplicationSkipped, - metrics.getInflightReplicationSkipped()); - assertEquals(deletionSkipped + expectedDeletionSkipped, - metrics.getInflightDeletionSkipped()); - - //reset limits for other tests. - createReplicationManager(0, 0); - } - - /** - * Checks that the set of datanodes given delete commands exactly matches - * targetDNs. - */ - private void assertExactDeleteTargets(DatanodeDetails... targetDNs) { - List deleteCommands = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() == - SCMCommandProto.Type.deleteContainerCommand) - .collect(Collectors.toList()); - - assertEquals(targetDNs.length, deleteCommands.size()); - - Set targetDNIDs = Arrays.stream(targetDNs) - .map(DatanodeDetails::getUuid) - .collect(Collectors.toSet()); - Set chosenDNIDs = deleteCommands.stream() - .map(CommandForDatanode::getDatanodeId) - .collect(Collectors.toSet()); - - assertEquals(targetDNIDs, chosenDNIDs); - } - - /** - * Checks if the set of nodes with deletions scheduled were taken from the - * provided set of DNs. - */ - private void assertAnyDeleteTargets(DatanodeDetails... validDeleteDNs) { - List deleteCommands = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() == - SCMCommandProto.Type.deleteContainerCommand) - .collect(Collectors.toList()); - - Set deleteCandidateIDs = Arrays.stream(validDeleteDNs) - .map(DatanodeDetails::getUuid) - .collect(Collectors.toSet()); - Set chosenDNIDs = deleteCommands.stream() - .map(CommandForDatanode::getDatanodeId) - .collect(Collectors.toSet()); - - assertThat(deleteCandidateIDs).containsAll(chosenDNIDs); - } - - /** - * Checks if the set of nodes with deletions scheduled contains all of the - * provided DNs. - */ - private void assertDeleteTargetsContain(DatanodeDetails... deleteDN) { - List deleteCommands = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() == - SCMCommandProto.Type.deleteContainerCommand) - .collect(Collectors.toList()); - - Set deleteDNIDs = Arrays.stream(deleteDN) - .map(DatanodeDetails::getUuid) - .collect(Collectors.toSet()); - Set chosenDNIDs = deleteCommands.stream() - .map(CommandForDatanode::getDatanodeId) - .collect(Collectors.toSet()); - - assertThat(chosenDNIDs).containsAll(deleteDNIDs); - } - - private ContainerInfo createContainer(LifeCycleState containerState) - throws IOException, TimeoutException { - return createContainer(containerState, CONTAINER_USED_BYTES_DEFAULT, - CONTAINER_NUM_KEYS_DEFAULT); - } - - private ContainerInfo createContainer(LifeCycleState containerState, - long usedBytes, long numKeys) throws IOException, TimeoutException { - final ContainerInfo container = getContainer(containerState); - container.setUsedBytes(usedBytes); - container.setNumberOfKeys(numKeys); - containerStateManager.addContainer(container.getProtobuf()); - return container; - } - - private DatanodeDetails addNode(NodeStatus nodeStatus) { - DatanodeDetails dn = randomDatanodeDetails(); - dn.setPersistedOpState(nodeStatus.getOperationalState()); - dn.setPersistedOpStateExpiryEpochSec( - nodeStatus.getOpStateExpiryEpochSeconds()); - nodeManager.register(dn, nodeStatus); - return dn; - } - - private void resetReplicationManager() throws InterruptedException { - replicationManager.stop(); - Thread.sleep(100L); - replicationManager.start(); - Thread.sleep(100L); - } - - private ContainerReplica addReplica(ContainerInfo container, - NodeStatus nodeStatus, State replicaState) - throws ContainerNotFoundException { - DatanodeDetails dn = addNode(nodeStatus); - return addReplicaToDn(container, dn, replicaState); - } - - private ContainerReplica addReplica(ContainerInfo container, - NodeStatus nodeStatus, State replicaState, long usedBytes, long numOfKeys) - throws ContainerNotFoundException { - DatanodeDetails dn = addNode(nodeStatus); - return addReplicaToDn(container, dn, replicaState, usedBytes, numOfKeys); - } - - private ContainerReplica addReplicaToDn(ContainerInfo container, - DatanodeDetails dn, State replicaState) - throws ContainerNotFoundException { - return addReplicaToDn(container, dn, replicaState, 1000L); - } - - private ContainerReplica addReplicaToDn(ContainerInfo container, - DatanodeDetails dn, State replicaState, long bcsId) - throws ContainerNotFoundException { - // Using the same originID for all replica in the container set. If each - // replica has a unique originID, it causes problems in ReplicationManager - // when processing over-replicated containers. - final UUID originNodeId = getUUID(container.getContainerID()); - final ContainerReplica replica = getReplicas(container.containerID(), - replicaState, container.getUsedBytes(), container.getNumberOfKeys(), - bcsId, originNodeId, dn); - containerStateManager - .updateContainerReplica(container.containerID(), replica); - return replica; - } - - static UUID getUUID(long id) { - return UUID.nameUUIDFromBytes(LongCodec.get().toPersistedFormat(id)); - } - - private ContainerReplica addReplicaToDn(ContainerInfo container, - DatanodeDetails dn, State replicaState, long usedBytes, long numOfKeys) { - // Using the same originID for all replica in the container set. If each - // replica has a unique originID, it causes problems in ReplicationManager - // when processing over-replicated containers. - final UUID originNodeId = getUUID(container.getContainerID()); - final ContainerReplica replica = getReplicas(container.containerID(), - replicaState, usedBytes, numOfKeys, 1000L, originNodeId, dn); - containerStateManager - .updateContainerReplica(container.containerID(), replica); - return replica; - } - - private void assertReplicaScheduled(int delta) { - final int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentReplicateCommandCount + delta, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - assertEquals(currentReplicateCommandCount + delta, - replicationManager.getMetrics().getReplicationCmdsSentTotal()); - } - - private void assertDeleteScheduled(int delta) { - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processAll(); - eventQueue.processAll(1000); - assertEquals(currentDeleteCommandCount + delta, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.deleteContainerCommand)); - assertEquals(currentDeleteCommandCount + delta, - replicationManager.getMetrics().getDeletionCmdsSentTotal()); - } - - private void assertUnderReplicatedCount(int count) { - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(count, report.getStat( - ReplicationManagerReport.HealthState.UNDER_REPLICATED)); - } - - private void assertMissingCount(int count) { - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(count, report.getStat(ReplicationManagerReport.HealthState.MISSING)); - } - - private void assertOverReplicatedCount(int count) { - ReplicationManagerReport report = replicationManager.getContainerReport(); - assertEquals(count, report.getStat(ReplicationManagerReport.HealthState.OVER_REPLICATED)); - } - - private static class DatanodeCommandHandler implements - EventHandler { - - private AtomicInteger invocation = new AtomicInteger(0); - private Map commandInvocation = - new HashMap<>(); - private List commands = new ArrayList<>(); - - @Override - public void onMessage(final CommandForDatanode command, - final EventPublisher publisher) { - final SCMCommandProto.Type type = command.getCommand().getType(); - commandInvocation.computeIfAbsent(type, k -> new AtomicInteger(0)); - commandInvocation.get(type).incrementAndGet(); - invocation.incrementAndGet(); - commands.add(command); - } - - private int getInvocation() { - return invocation.get(); - } - - private int getInvocationCount(SCMCommandProto.Type type) { - return commandInvocation.containsKey(type) ? - commandInvocation.get(type).get() : 0; - } - - private List getReceivedCommands() { - return commands; - } - - /** - * Returns true if the command handler has received the given - * command type for the provided datanode. - * - * @param type Command Type - * @param datanode DatanodeDetails - * @return True if command was received, false otherwise - */ - private boolean received(final SCMCommandProto.Type type, - final DatanodeDetails datanode) { - return commands.stream().anyMatch(dc -> - dc.getCommand().getType().equals(type) && - dc.getDatanodeId().equals(datanode.getUuid())); - } - - private void clearState() { - commands.clear(); - invocation.set(0); - commandInvocation.clear(); - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index 568eba57154..d3353944510 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -115,7 +115,6 @@ public class TestReplicationManager { private OzoneConfiguration configuration; private ReplicationManager replicationManager; - private LegacyReplicationManager legacyReplicationManager; private ContainerManager containerManager; private PlacementPolicy ratisPlacementPolicy; private PlacementPolicy ecPlacementPolicy; @@ -156,7 +155,6 @@ public void setup() throws IOException { return null; }).when(nodeManager).addDatanodeCommand(any(), any()); - legacyReplicationManager = mock(LegacyReplicationManager.class); clock = new TestClock(Instant.now(), ZoneId.systemDefault()); containerReplicaPendingOps = new ContainerReplicaPendingOps(clock); @@ -209,7 +207,6 @@ private ReplicationManager createReplicationManager() throws IOException { scmContext, nodeManager, clock, - legacyReplicationManager, containerReplicaPendingOps) { @Override protected void startSubServices() { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerMetrics.java index 31e2384d90d..53e76714624 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerMetrics.java @@ -27,7 +27,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.any; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.apache.ozone.test.MetricsAsserts.getLongGauge; import static org.apache.ozone.test.MetricsAsserts.getMetrics; @@ -57,16 +56,11 @@ public void setup() { report.increment(s); } } - final LegacyReplicationManager lrm = mock( - LegacyReplicationManager.class); - when(lrm.getInflightCount(any(InflightType.class))) - .thenReturn(0); ConfigurationSource conf = new OzoneConfiguration(); ReplicationManager.ReplicationManagerConfiguration rmConf = conf .getObject(ReplicationManager.ReplicationManagerConfiguration.class); ReplicationManager replicationManager = mock(ReplicationManager.class); when(replicationManager.getConfig()).thenReturn(rmConf); - when(replicationManager.getLegacyReplicationManager()).thenReturn(lrm); when(replicationManager.getContainerReport()).thenReturn(report); when(replicationManager.getContainerReplicaPendingOps()) .thenReturn(mock(ContainerReplicaPendingOps.class)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java index 437fdc1c06f..d5401185990 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java @@ -105,7 +105,6 @@ public class TestReplicationManagerScenarios { private OzoneConfiguration configuration; private ReplicationManager replicationManager; - private LegacyReplicationManager legacyReplicationManager; private ContainerManager containerManager; private PlacementPolicy ratisPlacementPolicy; private PlacementPolicy ecPlacementPolicy; @@ -184,7 +183,6 @@ public void setup() throws IOException, NodeNotFoundException { return null; }).when(nodeManager).addDatanodeCommand(any(), any()); - legacyReplicationManager = mock(LegacyReplicationManager.class); clock = new TestClock(Instant.now(), ZoneId.systemDefault()); containerReplicaPendingOps = new ContainerReplicaPendingOps(clock); @@ -232,7 +230,6 @@ private ReplicationManager createReplicationManager() throws IOException { scmContext, nodeManager, clock, - legacyReplicationManager, containerReplicaPendingOps) { @Override protected void startSubServices() { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java index a5a2054a8ae..049f38480d8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.RemoveSCMRequest; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; import org.junit.jupiter.api.BeforeEach; @@ -31,6 +32,7 @@ import java.io.IOException; import java.lang.reflect.Proxy; import java.util.List; +import java.util.UUID; import java.util.concurrent.ExecutionException; import static org.assertj.core.api.Assertions.assertThat; @@ -111,6 +113,11 @@ public SCMStateMachine getSCMStateMachine() { public GrpcTlsConfig getGrpcTlsConfig() { return null; } + + @Override + public RaftPeerId getLeaderId() { + return RaftPeerId.valueOf(UUID.randomUUID().toString()); + } }; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisServerImpl.java new file mode 100644 index 00000000000..6919ce41ed1 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisServerImpl.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServer; +import org.junit.jupiter.api.Test; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; + +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +/** + * Test for SCM Ratis Server Implementation. + */ +public class TestSCMRatisServerImpl { + + @Test + public void testGetLeaderId() throws Exception { + + try ( + MockedConstruction mockedSecurityConfigConstruction = mockConstruction(SecurityConfig.class); + MockedStatic staticMockedRaftServer = mockStatic(RaftServer.class); + MockedStatic staticMockedRatisUtil = mockStatic(RatisUtil.class); + ) { + // given + ConfigurationSource conf = mock(ConfigurationSource.class); + StorageContainerManager scm = mock(StorageContainerManager.class); + String clusterId = "CID-" + UUID.randomUUID(); + when(scm.getClusterId()).thenReturn(clusterId); + SCMHADBTransactionBuffer dbTransactionBuffer = mock(SCMHADBTransactionBuffer.class); + + RaftServer.Builder raftServerBuilder = mock(RaftServer.Builder.class); + when(raftServerBuilder.setServerId(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setProperties(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setStateMachineRegistry(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setOption(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setGroup(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setParameters(any())).thenReturn(raftServerBuilder); + + RaftServer raftServer = mock(RaftServer.class); + + RaftServer.Division division = mock(RaftServer.Division.class); + when(raftServer.getDivision(any())).thenReturn(division); + + SCMStateMachine scmStateMachine = mock(SCMStateMachine.class); + when(division.getStateMachine()).thenReturn(scmStateMachine); + + when(raftServerBuilder.build()).thenReturn(raftServer); + + staticMockedRaftServer.when(RaftServer::newBuilder).thenReturn(raftServerBuilder); + + RaftProperties raftProperties = mock(RaftProperties.class); + staticMockedRatisUtil.when(() -> RatisUtil.newRaftProperties(conf)).thenReturn(raftProperties); + + SecurityConfig sc = new SecurityConfig(conf); + when(sc.isSecurityEnabled()).thenReturn(false); + + SCMRatisServerImpl scmRatisServer = spy(new SCMRatisServerImpl(conf, scm, dbTransactionBuffer)); + doReturn(RaftPeer.newBuilder().setId(RaftPeerId.valueOf("peer1")).build()).when(scmRatisServer).getLeader(); + + // when + RaftPeerId leaderId = scmRatisServer.getLeaderId(); + + // then + assertEquals(RaftPeerId.valueOf("peer1"), leaderId); + + // but when + doReturn(null).when(scmRatisServer).getLeader(); + leaderId = scmRatisServer.getLeaderId(); + + // then + assertNull(leaderId); + } + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java index 92509d22685..4e69f46b6e9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java @@ -49,7 +49,7 @@ public class TestStatefulServiceStateManagerImpl { void setup(@TempDir File testDir) throws IOException { conf = SCMTestUtils.getConf(testDir); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); statefulServiceConfig = SCMDBDefinition.STATEFUL_SERVICE_CONFIG.getTable(dbStore); scmhaManager = SCMHAManagerStub.getInstance(true, dbStore); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java index fb80fbbee78..f09bb43d4cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java @@ -30,6 +30,10 @@ * Codec to serialize / deserialize PipelineID. */ public class OldPipelineIDCodecForTesting implements Codec { + @Override + public Class getTypeClass() { + return PipelineID.class; + } @Override public byte[] toPersistedFormat(PipelineID object) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java index 67593dc7778..3a8fc9a9632 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java @@ -45,6 +45,11 @@ private OldX509CertificateCodecForTesting() { // singleton } + @Override + public Class getTypeClass() { + return X509Certificate.class; + } + @Override public byte[] toPersistedFormat(X509Certificate object) throws IOException { try { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index f3a303cad73..0862c46e838 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -100,8 +100,7 @@ public class TestContainerPlacement { public void setUp() throws Exception { conf = getConf(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); sequenceIdGen = new SequenceIdGenerator( conf, scmhaManager, SCMDBDefinition.SEQUENCE_ID.getTable(dbStore)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 5c04ad63210..eebe523f4fd 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.replication.LegacyRatisContainerReplicaCount; import org.apache.hadoop.hdds.scm.container.replication.RatisContainerReplicaCount; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.container.SimpleMockNodeManager; @@ -223,85 +222,6 @@ public void testDecommissionNodeWaitsForContainersToReplicate() nodeManager.getNodeStatus(dn1).getOperationalState()); } - /** - * Situation: A QUASI_CLOSED container has an UNHEALTHY replica with the - * greatest BCSID, and three QUASI_CLOSED replicas with a smaller BCSID. The - * UNHEALTHY container is on a decommissioning node, and there are no other - * copies of this replica, that is, replicas with the same Origin ID as - * this replica. - * - * Expectation: Decommissioning should not complete until the UNHEALTHY - * replica has been replicated to another node. - * - * Note: This test currently uses the LegacyReplicationManager, as the new - * one doesn't support this behaviour yet. - * @throws NodeNotFoundException - * @throws ContainerNotFoundException - */ - @Test - public void testDecommissionWaitsForUnhealthyReplicaToReplicate() - throws NodeNotFoundException, ContainerNotFoundException { - conf.setBoolean("hdds.scm.replication.enable.legacy", true); - - DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); - nodeManager.register(dn1, - new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING, - HddsProtos.NodeState.HEALTHY)); - - // create 3 QUASI_CLOSED replicas with containerID 1 and same origin ID - ContainerID containerID = ContainerID.valueOf(1); - Set replicas = - ReplicationTestUtil.createReplicasWithSameOrigin(containerID, - State.QUASI_CLOSED, 0, 0, 0); - - // the container's sequence id is greater than the healthy replicas' - ContainerInfo container = ReplicationTestUtil.createContainerInfo( - RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE), containerID.getId(), - HddsProtos.LifeCycleState.QUASI_CLOSED, - replicas.iterator().next().getSequenceId() + 1); - // UNHEALTHY replica is on a unique origin and has same sequence id as - // the container - ContainerReplica unhealthy = - ReplicationTestUtil.createContainerReplica(containerID, 0, - dn1.getPersistedOpState(), State.UNHEALTHY, - container.getNumberOfKeys(), container.getUsedBytes(), dn1, - dn1.getUuid(), container.getSequenceId()); - replicas.add(unhealthy); - nodeManager.setContainers(dn1, ImmutableSet.of(containerID)); - - when(repManager.getContainerReplicaCount(eq(containerID))) - .thenReturn(new LegacyRatisContainerReplicaCount(container, replicas, - 0, 0, 3, 2)); - - // start monitoring dn1 - monitor.startMonitoring(dn1); - monitor.run(); - assertEquals(1, monitor.getTrackedNodeCount()); - assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, - nodeManager.getNodeStatus(dn1).getOperationalState()); - - // Running the monitor again causes it to remain DECOMMISSIONING - // as nothing has changed. - monitor.run(); - assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, - nodeManager.getNodeStatus(dn1).getOperationalState()); - - // add a copy of the UNHEALTHY replica on a new node, dn1 should get - // decommissioned now - ContainerReplica copyOfUnhealthyOnNewNode = unhealthy.toBuilder() - .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) - .build(); - replicas.add(copyOfUnhealthyOnNewNode); - when(repManager.getContainerReplicaCount(eq(containerID))) - .thenReturn(new LegacyRatisContainerReplicaCount(container, replicas, - 0, 0, 3, 2)); - monitor.run(); - assertEquals(0, monitor.getTrackedNodeCount()); - assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED, - nodeManager.getNodeStatus(dn1).getOperationalState()); - } - /** * Situation: A QUASI_CLOSED container has an UNHEALTHY replica with the * greatest BCSID, and three QUASI_CLOSED replicas with a smaller BCSID. The @@ -440,72 +360,6 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN nodeManager.getNodeStatus(dn1).getOperationalState()); } - /** - * Consider a QUASI_CLOSED container with only UNHEALTHY replicas. If one - * of its nodes is decommissioned, the decommissioning should succeed. - */ - @Test - public void testQuasiClosedContainerWithAllUnhealthyReplicas() - throws NodeNotFoundException, ContainerNotFoundException { - conf.setBoolean("hdds.scm.replication.enable.legacy", true); - - DatanodeDetails decommissioningNode = - MockDatanodeDetails.randomDatanodeDetails(); - nodeManager.register(decommissioningNode, - new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING, - HddsProtos.NodeState.HEALTHY)); - ContainerInfo container = ReplicationTestUtil.createContainer( - HddsProtos.LifeCycleState.QUASI_CLOSED, - RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE)); - Set replicas = - ReplicationTestUtil.createReplicas(container.containerID(), - State.UNHEALTHY, 0, 0); - - ContainerReplica decommissioningReplica = - ReplicationTestUtil.createContainerReplica(container.containerID(), 0, - DECOMMISSIONING, State.UNHEALTHY, container.getNumberOfKeys(), - container.getUsedBytes(), decommissioningNode, - decommissioningNode.getUuid()); - replicas.add(decommissioningReplica); - nodeManager.setContainers(decommissioningNode, - ImmutableSet.of(container.containerID())); - - when(repManager.getContainerReplicaCount( - eq(container.containerID()))) - .thenReturn(new LegacyRatisContainerReplicaCount(container, replicas, - Collections.emptyList(), 2, true)); - - // start monitoring dn1 - monitor.startMonitoring(decommissioningNode); - monitor.run(); - assertEquals(1, monitor.getTrackedNodeCount()); - assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, - nodeManager.getNodeStatus(decommissioningNode).getOperationalState()); - - // Running the monitor again causes it to remain DECOMMISSIONING - // as nothing has changed. - monitor.run(); - assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, - nodeManager.getNodeStatus(decommissioningNode).getOperationalState()); - - // add a copy of the UNHEALTHY replica on a new node, decommissioningNode - // should get decommissioned now - ContainerReplica copyOfUnhealthyOnNewNode = - decommissioningReplica.toBuilder() - .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) - .build(); - replicas.add(copyOfUnhealthyOnNewNode); - when(repManager.getContainerReplicaCount( - eq(container.containerID()))) - .thenReturn(new LegacyRatisContainerReplicaCount(container, replicas, - Collections.emptyList(), 3, true)); - monitor.run(); - assertEquals(0, monitor.getTrackedNodeCount()); - assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED, - nodeManager.getNodeStatus(decommissioningNode).getOperationalState()); - } - @Test public void testDecommissionNotBlockedByDeletingContainers() throws NodeNotFoundException, ContainerNotFoundException { @@ -840,7 +694,6 @@ public void testCancelledNodesMovedToInService() @Test public void testContainersReplicatedOnDecomDnAPI() throws NodeNotFoundException, ContainerNotFoundException { - conf.setBoolean("hdds.scm.replication.enable.legacy", false); DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); nodeManager.register(dn1, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 4511ffea5d2..12cb37b8409 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -237,21 +237,15 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() // same IP so we have 3 registered from the same host and 2 distinct ports. DatanodeDetails sourceDN = dns.get(9); int ratisPort = sourceDN - .getPort(DatanodeDetails.Port.Name.RATIS).getValue(); + .getRatisPort().getValue(); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName(sourceDN.getHostName()) .setIpAddress(sourceDN.getIpAddress()) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - sourceDN.getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue() + 1)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, - ratisPort + 1)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, - sourceDN.getPort(DatanodeDetails.Port.Name.REST).getValue() + 1)) + .addPort(DatanodeDetails.newStandalonePort(sourceDN.getStandalonePort() + .getValue() + 1)) + .addPort(DatanodeDetails.newRatisPort(ratisPort + 1)) + .addPort(DatanodeDetails.newRestPort(sourceDN.getRestPort().getValue() + 1)) .setNetworkLocation(sourceDN.getNetworkLocation()); DatanodeDetails extraDN = builder.build(); dns.add(extraDN); @@ -440,6 +434,10 @@ public void testInsufficientNodeDecommissionThrowsExceptionForRatis() throws error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot decommission as a minimum of %d IN-SERVICE HEALTHY nodes are required", 3); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, @@ -489,6 +487,10 @@ public void testInsufficientNodeDecommissionThrowsExceptionForEc() throws error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot decommission as a minimum of %d IN-SERVICE HEALTHY nodes are required", 5); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); @@ -537,6 +539,10 @@ public void testInsufficientNodeDecommissionThrowsExceptionRatisAndEc() throws error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot decommission as a minimum of %d IN-SERVICE HEALTHY nodes are required", 5); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); @@ -637,6 +643,7 @@ public void testInsufficientNodeDecommissionChecksForNNF() throws error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), false); assertFalse(error.get(0).getHostname().contains("AllHosts")); + assertTrue(error.get(0).getError().contains("The host was not found in SCM")); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -673,6 +680,11 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForRatis() throws error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), 100, false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot enter maintenance mode as a minimum of %d IN-SERVICE HEALTHY nodes are required", + 2); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, @@ -768,6 +780,11 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForEc() throws error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), 100, false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot enter maintenance mode as a minimum of %d IN-SERVICE HEALTHY nodes are required", + 4); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, @@ -869,6 +886,11 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForRatisAndEc() throws // it should not be allowed as for EC, maintenance.remaining.redundancy is 2 => 3+2=5 DNs are required error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress()), 100, false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot enter maintenance mode as a minimum of %d IN-SERVICE HEALTHY nodes are required", + 5); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); @@ -1053,12 +1075,9 @@ private List generateDatanodes() { builder.setUuid(UUID.randomUUID()) .setHostName(multiDn.getHostName()) .setIpAddress(multiDn.getIpAddress()) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 3456)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 4567)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 5678)) + .addPort(DatanodeDetails.newStandalonePort(3456)) + .addPort(DatanodeDetails.newRatisPort(4567)) + .addPort(DatanodeDetails.newRestPort(5678)) .setNetworkLocation(multiDn.getNetworkLocation()); DatanodeDetails dn = builder.build(); @@ -1072,16 +1091,9 @@ private List generateDatanodes() { builder.setUuid(UUID.randomUUID()) .setHostName(duplicatePorts.getHostName()) .setIpAddress(duplicatePorts.getIpAddress()) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - duplicatePorts.getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue())) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, - duplicatePorts.getPort(DatanodeDetails.Port.Name.RATIS).getValue())) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, - duplicatePorts.getPort(DatanodeDetails.Port.Name.REST).getValue())) + .addPort(DatanodeDetails.newStandalonePort(duplicatePorts.getStandalonePort().getValue())) + .addPort(DatanodeDetails.newRatisPort(duplicatePorts.getRatisPort().getValue())) + .addPort(DatanodeDetails.newRestPort(duplicatePorts.getRestPort().getValue())) .setNetworkLocation(multiDn.getNetworkLocation()); dn = builder.build(); dns.add(dn); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index cc9133cf684..6d11cb5fe58 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -123,6 +123,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentCaptor; @@ -850,15 +851,12 @@ void testScmHandleJvmPause() throws Exception { } } - @Test - public void testProcessLayoutVersion() throws IOException { - // TODO: Refactor this class to use org.junit.jupiter so test - // parameterization can be used. - for (FinalizationCheckpoint checkpoint: FinalizationCheckpoint.values()) { - LOG.info("Testing with SCM finalization checkpoint {}", checkpoint); - testProcessLayoutVersionLowerMlv(checkpoint); - testProcessLayoutVersionReportHigherMlv(checkpoint); - } + @ParameterizedTest + @EnumSource(FinalizationCheckpoint.class) + public void testProcessLayoutVersion(FinalizationCheckpoint checkpoint) throws IOException { + LOG.info("Testing with SCM finalization checkpoint {}", checkpoint); + testProcessLayoutVersionLowerMlv(checkpoint); + testProcessLayoutVersionReportHigherMlv(checkpoint); } // Currently invoked by testProcessLayoutVersion. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java index 385e1c65316..9908210e074 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java @@ -66,8 +66,7 @@ public class TestPipelineDatanodesIntersection { public void initialize() throws IOException { conf = SCMTestUtils.getConf(testDir); end = false; - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); } @AfterEach diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index e9407d6a941..1dfbfd32785 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -136,7 +136,7 @@ void init(@TempDir File testDir, @TempDir File dbDir) throws Exception { // placement policy (Rack Scatter), so just use the random one. conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_EC_IMPL_KEY, SCMContainerPlacementRandom.class.getName()); - dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); nodeManager = new MockNodeManager(true, 20); maxPipelineCount = nodeManager.getNodeCount( HddsProtos.NodeOperationalState.IN_SERVICE, @@ -358,7 +358,8 @@ public void testClosePipelineShouldFailOnFollower() throws Exception { public void testPipelineReport() throws Exception { try (PipelineManagerImpl pipelineManager = createPipelineManager(true)) { SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(conf, new ArrayList<>(), null, pipelineManager, + new SCMSafeModeManager(conf, new ArrayList<>(), + mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); Pipeline pipeline = pipelineManager .createPipeline(RatisReplicationConfig @@ -469,7 +470,7 @@ public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(new OzoneConfiguration(), new ArrayList<>(), - null, pipelineManager, new EventQueue(), + mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); PipelineReportHandler pipelineReportHandler = new PipelineReportHandler(scmSafeModeManager, pipelineManager, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java index 96f62432b31..82fcc01d7ee 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java @@ -135,8 +135,7 @@ private void setupRacks(int datanodeCount, int nodesPerRack, .thenReturn(dn); } - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 0f9ec84f033..722bb260859 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -112,8 +112,7 @@ public void init() throws Exception { conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 10, StorageUnit.MB); nodeManager.setNumPipelinePerDatanode(PIPELINE_LOAD_LIMIT); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() .setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)) @@ -398,9 +397,9 @@ private DatanodeDetails overwriteLocationInNode( .setUuid(datanode.getUuid()) .setHostName(datanode.getHostName()) .setIpAddress(datanode.getIpAddress()) - .addPort(datanode.getPort(DatanodeDetails.Port.Name.STANDALONE)) - .addPort(datanode.getPort(DatanodeDetails.Port.Name.RATIS)) - .addPort(datanode.getPort(DatanodeDetails.Port.Name.REST)) + .addPort(datanode.getStandalonePort()) + .addPort(datanode.getRatisPort()) + .addPort(datanode.getRestPort()) .setNetworkLocation(node.getNetworkLocation()).build(); return result; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java index 9feb9e1f0a9..4a0baa2daca 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java @@ -68,8 +68,7 @@ public class TestPipelineStateManagerImpl { @BeforeEach public void init() throws Exception { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); NodeManager nodeManager = new MockNodeManager(true, 10); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 5350c0da86e..94c0d45276c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -93,8 +93,7 @@ public void init(int maxPipelinePerNode, OzoneConfiguration conf) public void init(int maxPipelinePerNode, OzoneConfiguration conf, File dir) throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); nodeManager = new MockNodeManager(true, 10); nodeManager.setNumPipelinePerDatanode(maxPipelinePerNode); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java index b69ebedb04d..7fb31d2c768 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java @@ -60,8 +60,7 @@ public class TestSimplePipelineProvider { public void init() throws Exception { nodeManager = new MockNodeManager(true, 10); final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() .setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java index 4f86450d03e..78aab4843cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java @@ -128,8 +128,7 @@ void setup(@TempDir File testDir) throws IOException { containers = new HashMap<>(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index 98f16394902..13eb4be724c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; @@ -50,6 +51,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * This class tests HealthyPipelineSafeMode rule. @@ -69,6 +72,8 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() OzoneConfiguration config = new OzoneConfiguration(); MockNodeManager nodeManager = new MockNodeManager(true, 0); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( @@ -94,7 +99,7 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, eventQueue, + config, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -121,6 +126,8 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( @@ -172,7 +179,7 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, eventQueue, + config, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -215,6 +222,8 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( @@ -266,7 +275,7 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, eventQueue, + config, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index e070a2b6036..76bafa8b1fb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; @@ -58,6 +59,8 @@ import org.slf4j.LoggerFactory; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * This class tests OneReplicaPipelineSafeModeRule. @@ -86,7 +89,8 @@ private void setup(int nodes, int pipelineFactorThreeCount, List containers = new ArrayList<>(); containers.addAll(HddsTestUtils.getContainerInfo(1)); mockNodeManager = new MockNodeManager(true, nodes); - + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); eventQueue = new EventQueue(); serviceManager = new SCMServiceManager(); scmContext = SCMContext.emptyContext(); @@ -116,7 +120,7 @@ private void setup(int nodes, int pipelineFactorThreeCount, HddsProtos.ReplicationFactor.ONE); SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(ozoneConfiguration, containers, null, + new SCMSafeModeManager(ozoneConfiguration, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); rule = scmSafeModeManager.getOneReplicaPipelineSafeModeRule(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 319caabe40a..fc8ec9c1912 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; import java.time.Clock; +import java.time.ZoneId; import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; @@ -28,6 +29,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -39,7 +41,10 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; import org.apache.hadoop.hdds.scm.ha.SCMContext; @@ -52,6 +57,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -63,14 +69,19 @@ import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** Test class for SCMSafeModeManager. */ @@ -96,8 +107,7 @@ public void setUp() throws IOException { config = new OzoneConfiguration(); config.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempDir.getAbsolutePath().toString()); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getAbsolutePath()); scmMetadataStore = new SCMMetadataStoreImpl(config); } @@ -108,19 +118,10 @@ public void destroyDbStore() throws Exception { } } - @Test - public void testSafeModeState() throws Exception { - // Test 1: test for 0 containers - testSafeMode(0); - - // Test 2: test for 20 containers - testSafeMode(20); - } - - @Test - public void testSafeModeStateWithNullContainers() { - new SCMSafeModeManager(config, Collections.emptyList(), - null, null, queue, serviceManager, scmContext); + @ParameterizedTest + @ValueSource(ints = {0, 20}) + public void testSafeModeState(int numContainers) throws Exception { + testSafeMode(numContainers); } private void testSafeMode(int numContainers) throws Exception { @@ -132,14 +133,18 @@ private void testSafeMode(int numContainers) throws Exception { container.setState(HddsProtos.LifeCycleState.CLOSED); container.setNumberOfKeys(10); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, null, queue, + config, containers, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); validateRuleStatus("DatanodeSafeModeRule", "registered datanodes 0"); - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); long cutOff = (long) Math.ceil(numContainers * config.getDouble( HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, @@ -167,8 +172,10 @@ public void testSafeModeExitRule() throws Exception { container.setState(HddsProtos.LifeCycleState.CLOSED); container.setNumberOfKeys(10); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, null, queue, + config, containers, containerManager, null, queue, serviceManager, scmContext); long cutOff = (long) Math.ceil(numContainers * config.getDouble( @@ -180,7 +187,7 @@ public void testSafeModeExitRule() throws Exception { assertTrue(scmSafeModeManager.getInSafeMode()); validateRuleStatus("ContainerSafeModeRule", - "% of containers with at least one reported"); + "0.00% of [Ratis] Containers(0 / 100) with at least one reported"); testContainerThreshold(containers.subList(0, 25), 0.25); assertEquals(25, scmSafeModeManager.getSafeModeMetrics() .getCurrentContainersWithOneReplicaReportedCount().value()); @@ -215,36 +222,6 @@ private OzoneConfiguration createConf(double healthyPercent, return conf; } - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck1() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0.90, 1); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck2() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0.10, 0.9); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck3() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0, 0.9); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck4() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck5() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0.5); - } - @ParameterizedTest @CsvSource(value = {"100,0.9,false", "0.9,200,false", "0.9,0.1,true"}) public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, @@ -264,11 +241,26 @@ public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, scmContext, serviceManager, Clock.system(ZoneOffset.UTC)); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, - () -> new SCMSafeModeManager(conf, containers, null, pipelineManager, queue, serviceManager, scmContext)); + () -> new SCMSafeModeManager(conf, containers, containerManager, + pipelineManager, queue, serviceManager, scmContext)); assertThat(exception).hasMessageEndingWith("value should be >= 0.0 and <= 1.0"); } + private static Stream testCaseForSafeModeExitRuleWithPipelineAvailabilityCheck() { + return Stream.of( + Arguments.of(100, 30, 8, 0.90, 1), + Arguments.of(100, 90, 22, 0.10, 0.9), + Arguments.of(100, 30, 8, 0, 0.9), + Arguments.of(100, 90, 22, 0, 0), + Arguments.of(100, 90, 22, 0, 0.5) + ); + } + + @ParameterizedTest + @MethodSource("testCaseForSafeModeExitRuleWithPipelineAvailabilityCheck") public void testSafeModeExitRuleWithPipelineAvailabilityCheck( int containerCount, int nodeCount, int pipelineCount, double healthyPipelinePercent, double oneReplicaPercent) @@ -315,8 +307,11 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( container.setState(HddsProtos.LifeCycleState.CLOSED); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); + scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, pipelineManager, queue, serviceManager, + conf, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -449,18 +444,19 @@ public void testDisableSafeMode() { OzoneConfiguration conf = new OzoneConfiguration(config); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); PipelineManager pipelineManager = mock(PipelineManager.class); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, pipelineManager, queue, serviceManager, + conf, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); assertFalse(scmSafeModeManager.getInSafeMode()); } - @Test - public void testSafeModeDataNodeExitRule() throws Exception { + @ParameterizedTest + @ValueSource(ints = {0, 3, 5}) + public void testSafeModeDataNodeExitRule(int numberOfDns) throws Exception { containers = new ArrayList<>(); - testSafeModeDataNodes(0); - testSafeModeDataNodes(3); - testSafeModeDataNodes(5); + testSafeModeDataNodes(numberOfDns); } /** @@ -489,8 +485,11 @@ public void testContainerSafeModeRule() throws Exception { container.setNumberOfKeys(0); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); + scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, null, queue, serviceManager, scmContext); + config, containers, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -510,11 +509,86 @@ public void testContainerSafeModeRule() throws Exception { 100, 1000 * 5); } + // We simulate common EC types: EC-2-2-1024K, EC-3-2-1024K, EC-6-3-1024K. + static Stream processECDataParityCombination() { + Stream args = Stream.of(arguments(2, 2), + arguments(3, 2), arguments(6, 3)); + return args; + } + + @ParameterizedTest + @MethodSource("processECDataParityCombination") + public void testContainerSafeModeRuleEC(int data, int parity) throws Exception { + containers = new ArrayList<>(); + + // We generate 100 EC Containers. + containers.addAll(HddsTestUtils.getECContainerInfo(25 * 4, data, parity)); + + // Prepare the data for the container. + // We have prepared 25 containers in the CLOSED state and 75 containers in the OPEN state. + // Out of the 25 containers, only 20 containers have a NumberOfKeys greater than 0. + for (ContainerInfo container : containers.subList(0, 25)) { + container.setState(HddsProtos.LifeCycleState.CLOSED); + container.setNumberOfKeys(10); + } + + for (ContainerInfo container : containers.subList(25, 100)) { + container.setState(HddsProtos.LifeCycleState.OPEN); + container.setNumberOfKeys(10); + } + + // Set the last 5 closed containers to be empty + for (ContainerInfo container : containers.subList(20, 25)) { + container.setNumberOfKeys(0); + } + + for (ContainerInfo container : containers) { + scmMetadataStore.getContainerTable().put(container.containerID(), container); + } + + // Declare SCMSafeModeManager and confirm entry into Safe Mode. + EventQueue eventQueue = new EventQueue(); + MockNodeManager nodeManager = new MockNodeManager(true, 0); + PipelineManager pipelineManager = PipelineManagerImpl.newPipelineManager( + config, + SCMHAManagerStub.getInstance(true), + nodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue, + scmContext, + serviceManager, + Clock.system(ZoneOffset.UTC)); + + ContainerManager containerManager = new ContainerManagerImpl(config, + SCMHAManagerStub.getInstance(true), null, pipelineManager, + scmMetadataStore.getContainerTable(), + new ContainerReplicaPendingOps(Clock.system(ZoneId.systemDefault()))); + + scmSafeModeManager = new SCMSafeModeManager( + config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); + assertTrue(scmSafeModeManager.getInSafeMode()); + + // Only 20 containers are involved in the calculation, + // so when 10 containers complete registration, our threshold is 50%. + testECContainerThreshold(containers.subList(0, 10), 0.5, data); + assertTrue(scmSafeModeManager.getInSafeMode()); + + // When the registration of the remaining containers is completed, + // the threshold will reach 100%. + testECContainerThreshold(containers.subList(10, 20), 1.0, data); + + ContainerSafeModeRule containerSafeModeRule = + scmSafeModeManager.getContainerSafeModeRule(); + assertTrue(containerSafeModeRule.validate()); + } + private void testSafeModeDataNodes(int numOfDns) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(config); conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, numOfDns); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, null, queue, + conf, containers, containerManager, null, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. @@ -522,8 +596,10 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { // Register all DataNodes except last one and assert SCM is in safe mode. for (int i = 0; i < numOfDns - 1; i++) { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); assertTrue(scmSafeModeManager.getInSafeMode()); assertEquals(1, scmSafeModeManager.getCurrentContainerThreshold()); } @@ -543,14 +619,52 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { private void testContainerThreshold(List dnContainers, double expectedThreshold) throws Exception { + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(dnContainers); queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(dnContainers)); + nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, + nodeRegistrationContainerReport); GenericTestUtils.waitFor(() -> { double threshold = scmSafeModeManager.getCurrentContainerThreshold(); return threshold == expectedThreshold; }, 100, 2000 * 9); } + /** + * Test ECContainer reaching SafeMode threshold. + * + * @param dnContainers + * The list of containers that need to reach the threshold. + * @param expectedThreshold + * The expected threshold. + * @param dataBlockNum + * The number of data blocks. For EC-3-2-1024K, + * we need 3 registration requests to ensure the EC Container is confirmed. + * For EC-6-3-1024K, we need 6 registration requests to ensure the EC Container is confirmed. + * @throws Exception The thrown exception message. + */ + private void testECContainerThreshold(List dnContainers, + double expectedThreshold, int dataBlockNum) throws Exception { + + // Step1. We need to ensure the number of confirmed EC data blocks + // based on the quantity of dataBlockNum. + for (int i = 0; i < dataBlockNum; i++) { + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(dnContainers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, + nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, + nodeRegistrationContainerReport); + } + + // Step2. Wait for the threshold to be reached. + GenericTestUtils.waitFor(() -> { + double threshold = scmSafeModeManager.getCurrentECContainerThreshold(); + return threshold == expectedThreshold; + }, 100, 2000 * 9); + } + @Test public void testSafeModePipelineExitRule() throws Exception { containers = new ArrayList<>(); @@ -584,13 +698,18 @@ public void testSafeModePipelineExitRule() throws Exception { pipeline = pipelineManager.getPipeline(pipeline.getId()); MockRatisPipelineProvider.markPipelineHealthy(pipeline); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, queue, serviceManager, + config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); + assertTrue(scmSafeModeManager.getInSafeMode()); firePipelineEvent(pipelineManager, pipeline); @@ -634,8 +753,11 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); + scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, queue, serviceManager, + config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. @@ -647,8 +769,10 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { // Register all DataNodes except last one and assert SCM is in safe mode. for (int i = 0; i < numOfDns - 1; i++) { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); assertTrue(scmSafeModeManager.getInSafeMode()); assertFalse(scmSafeModeManager.getPreCheckComplete()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeRuleFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeRuleFactory.java new file mode 100644 index 00000000000..837012429be --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeRuleFactory.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.safemode; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Field; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class TestSafeModeRuleFactory { + + @Test + public void testIllegalState() { + // If the initialization is already done by different test, we have to reset it. + try { + final Field instance = SafeModeRuleFactory.class.getDeclaredField("instance"); + instance.setAccessible(true); + instance.set(null, null); + } catch (Exception e) { + throw new RuntimeException(); + } + assertThrows(IllegalStateException.class, SafeModeRuleFactory::getInstance); + } + + @Test + public void testLoadedSafeModeRules() { + initializeSafeModeRuleFactory(); + final SafeModeRuleFactory factory = SafeModeRuleFactory.getInstance(); + + // Currently we assert the total count against hardcoded value + // as the rules are hardcoded in SafeModeRuleFactory. + + // This will be fixed once we load rules using annotation. + assertEquals(4, factory.getSafeModeRules().size(), + "The total safemode rules count doesn't match"); + + } + + @Test + public void testLoadedPreCheckRules() { + initializeSafeModeRuleFactory(); + final SafeModeRuleFactory factory = SafeModeRuleFactory.getInstance(); + + // Currently we assert the total count against hardcoded value + // as the rules are hardcoded in SafeModeRuleFactory. + + // This will be fixed once we load rules using annotation. + assertEquals(1, factory.getPreCheckRules().size(), + "The total safemode rules count doesn't match"); + + } + + private void initializeSafeModeRuleFactory() { + final SCMSafeModeManager safeModeManager = mock(SCMSafeModeManager.class); + when(safeModeManager.getSafeModeMetrics()).thenReturn(mock(SafeModeMetrics.class)); + SafeModeRuleFactory.initialize(new OzoneConfiguration(), + SCMContext.emptyContext(), new EventQueue(), safeModeManager, mock( + PipelineManager.class), mock(ContainerManager.class)); + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 7c06b79a2ff..8e21eef930e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -17,12 +17,19 @@ */ package org.apache.hadoop.hdds.scm.server; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ReconfigurationHandler; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -35,9 +42,13 @@ import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -112,4 +123,47 @@ public void testReadOnlyAdmins() throws IOException { UserGroupInformation.reset(); } } + + /** + * Tests listContainer of scm. + */ + @Test + public void testScmListContainer() throws Exception { + SCMClientProtocolServer scmServer = + new SCMClientProtocolServer(new OzoneConfiguration(), + mockStorageContainerManager(), mock(ReconfigurationHandler.class)); + + assertEquals(10, scmServer.listContainer(1, 10, + null, HddsProtos.ReplicationType.RATIS, null).getContainerInfoList().size()); + // Test call from a legacy client, which uses a different method of listContainer + assertEquals(10, scmServer.listContainer(1, 10, null, + HddsProtos.ReplicationFactor.THREE).getContainerInfoList().size()); + } + + private StorageContainerManager mockStorageContainerManager() { + List infos = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + infos.add(newContainerInfoForTest()); + } + ContainerManagerImpl containerManager = mock(ContainerManagerImpl.class); + when(containerManager.getContainers()).thenReturn(infos); + StorageContainerManager storageContainerManager = mock(StorageContainerManager.class); + when(storageContainerManager.getContainerManager()).thenReturn(containerManager); + + SCMNodeDetails scmNodeDetails = mock(SCMNodeDetails.class); + when(scmNodeDetails.getClientProtocolServerAddress()).thenReturn(new InetSocketAddress("localhost", 9876)); + when(scmNodeDetails.getClientProtocolServerAddressKey()).thenReturn("test"); + when(storageContainerManager.getScmNodeDetails()).thenReturn(scmNodeDetails); + return storageContainerManager; + } + + private ContainerInfo newContainerInfoForTest() { + return new ContainerInfo.Builder() + .setContainerID(1) + .setPipelineID(PipelineID.randomId()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .build(); + } } diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index f720d65bdf5..6ff87083c03 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-test-utils - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Test Utils Apache Ozone HDDS Test Utils jar diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index c9fa668445d..8a770424766 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -29,6 +29,7 @@ import java.time.Instant; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.TimeoutException; import com.google.common.base.Preconditions; @@ -92,7 +93,10 @@ public static Instant getTestStartTime() { * Get the (created) base directory for tests. * * @return the absolute directory + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static File getTestDir() { String prop = System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_DIR); @@ -109,7 +113,10 @@ public static File getTestDir() { * Get an uncreated directory for tests. * * @return the absolute directory for tests. Caller is expected to create it. + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static File getTestDir(String subdir) { return new File(getTestDir(), subdir).getAbsoluteFile(); } @@ -119,7 +126,10 @@ public static File getTestDir(String subdir) { * name. This is likely to provide a unique path for tests run in parallel * * @return the absolute directory for tests. Caller is expected to create it. + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static File getRandomizedTestDir() { return new File(getRandomizedTempPath()); } @@ -131,7 +141,10 @@ public static File getRandomizedTestDir() { * * @param subpath sub path, with no leading "/" character * @return a string to use in paths + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static String getTempPath(String subpath) { String prop = WINDOWS ? DEFAULT_TEST_DATA_PATH : System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_PATH); @@ -152,8 +165,11 @@ public static String getTempPath(String subpath) { * under the relative path {@link #DEFAULT_TEST_DATA_PATH} * * @return a string to use in paths + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ @SuppressWarnings("java:S2245") // no need for secure random + @Deprecated public static String getRandomizedTempPath() { return getTempPath(getCallerClass(GenericTestUtils.class).getSimpleName() + "-" + randomAlphanumeric(10)); @@ -205,6 +221,20 @@ public static void waitFor(BooleanSupplier check, int checkEveryMillis, } } + public static T assertThrows( + Class expectedType, + Callable func) { + return Assertions.assertThrows(expectedType, () -> { + final AutoCloseable closeable = func.call(); + try { + if (closeable != null) { + closeable.close(); + } + } catch (Exception ignored) { + } + }); + } + /** * @deprecated use sl4fj based version */ @@ -335,11 +365,11 @@ private static long monotonicNow() { * *

    * TODO: Add lambda support once Java 8 is common. - *

    +   * {@code
        *   SystemErrCapturer.withCapture(capture -> {
        *     ...
        *   })
    -   * 
    + * } */ public static class SystemErrCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -376,11 +406,11 @@ public void close() throws Exception { * *

    * TODO: Add lambda support once Java 8 is common. - *

    +   * {@code
        *   SystemOutCapturer.withCapture(capture -> {
        *     ...
        *   })
    -   * 
    + * } */ public static class SystemOutCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -475,8 +505,8 @@ public static final class ReflectionUtils { * This method provides the modifiers field using reflection approach which is compatible * for both pre Java 9 and post java 9 versions. * @return modifiers field - * @throws IllegalAccessException - * @throws NoSuchFieldException + * @throws IllegalAccessException illegalAccessException, + * @throws NoSuchFieldException noSuchFieldException. */ public static Field getModifiersField() throws IllegalAccessException, NoSuchFieldException { Field modifiersField = null; diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java index 661989dade1..d6b028c815f 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java @@ -77,11 +77,13 @@ public interface TimeoutHandler { * is called. This returns the exception passed in (if any), * or generates a new one. *
    +   * {@code
        * await(
        *   30 * 1000,
        *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); },
        *   () -> 500),
        *   (timeout, ex) -> ex != null ? ex : new TimeoutException("timeout"));
    +   * }
        * 
    * * @param timeoutMillis timeout in milliseconds. @@ -160,9 +162,11 @@ public static int await(int timeoutMillis, *

    * Example: await for probe to succeed: *

    +   * {@code
        * await(
        *   30 * 1000, 500,
        *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); });
    +   * }
        * 
    * * @param timeoutMillis timeout in milliseconds. diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java index f2c65d14961..f4651a408f7 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java @@ -51,7 +51,8 @@ * Copied from Hadoop and migrated to AssertJ. */ public final class MetricsAsserts { - + // workaround for HADOOP-19301. + private static final MutableQuantiles QUANTILES = new MutableQuantiles(); private static final Logger LOG = LoggerFactory.getLogger(MetricsAsserts.class); private static final Offset EPSILON = Offset.offset(0.00001); private static final Offset EPSILON_FLOAT = Offset.offset(0.00001f); @@ -411,7 +412,7 @@ public static void assertQuantileGauges(String prefix, public static void assertQuantileGauges(String prefix, MetricsRecordBuilder rb, String valueName) { verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L)); - for (Quantile q : MutableQuantiles.quantiles) { + for (Quantile q : QUANTILES.getQuantiles()) { String nameTemplate = prefix + "%dthPercentile" + valueName; int percentile = (int) (100 * q.quantile); verify(rb).addGauge( @@ -432,7 +433,7 @@ public static void assertQuantileGauges(String prefix, public static void assertInverseQuantileGauges(String prefix, MetricsRecordBuilder rb, String valueName) { verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L)); - for (Quantile q : MutableQuantiles.quantiles) { + for (Quantile q : QUANTILES.getQuantiles()) { String nameTemplate = prefix + "%dthInversePercentile" + valueName; int percentile = (int) (100 * q.quantile); verify(rb).addGauge( diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index daf6f3d40f4..5b77f394c96 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-tools - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Tools Apache Ozone HDDS Tools jar @@ -179,20 +179,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-compiler-plugin - - org.apache.ozone - hdds-config - ${hdds.version} - org.kohsuke.metainf-services metainf-services ${metainf-services.version} + + info.picocli + picocli-codegen + ${picocli.version} + - org.apache.hadoop.hdds.conf.ConfigFileGenerator org.kohsuke.metainf_services.AnnotationProcessorImpl + picocli.codegen.aot.graalvm.processor.NativeImageConfigGeneratorProcessor @@ -207,8 +207,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator + org.apache.hadoop.hdds.conf.Config + org.apache.hadoop.hdds.conf.ConfigGroup org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AdminSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AdminSubcommand.java new file mode 100644 index 00000000000..b03b75eb8a9 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AdminSubcommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +/** Marker interface for subcommands to be added to {@code OzoneAdmin}. */ +public interface AdminSubcommand { + // marker +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/DebugSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/DebugSubcommand.java new file mode 100644 index 00000000000..3915fd86843 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/DebugSubcommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +/** Marker interface for subcommands to be added to {@code OzoneDebug}. */ +public interface DebugSubcommand { + // marker +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java index cc496a28e77..0c182d75e83 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -17,11 +17,7 @@ */ package org.apache.hadoop.hdds.cli; -import com.google.common.annotations.VisibleForTesting; -import java.io.IOException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine; @@ -33,41 +29,8 @@ description = "Developer tools for Ozone Admin operations", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneAdmin extends GenericCli { +public class OzoneAdmin extends GenericCli implements ExtensibleParentCommand { - private OzoneConfiguration ozoneConf; - - private UserGroupInformation user; - - public OzoneAdmin() { - super(OzoneAdmin.class); - } - - @VisibleForTesting - public OzoneAdmin(OzoneConfiguration conf) { - super(OzoneAdmin.class); - ozoneConf = conf; - } - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; - } - - public UserGroupInformation getUser() throws IOException { - if (user == null) { - user = UserGroupInformation.getCurrentUser(); - } - return user; - } - - /** - * Main for the Ozone Admin shell Command handling. - * - * @param argv - System Args Strings[] - */ public static void main(String[] argv) { new OzoneAdmin().run(argv); } @@ -79,4 +42,9 @@ public int execute(String[] argv) { return TracingUtil.executeInNewSpan(spanName, () -> super.execute(argv)); } + + @Override + public Class subcommandType() { + return AdminSubcommand.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/RepairSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/RepairSubcommand.java new file mode 100644 index 00000000000..1eb12b01253 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/RepairSubcommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +/** Marker interface for subcommands to be added to {@code OzoneRepair}. */ +public interface RepairSubcommand { + // marker +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java index 7f24d843b0f..2264f096a28 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdds.scm.cli; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; @@ -90,9 +89,8 @@ ContainerBalancerStopSubcommand.class, ContainerBalancerStatusSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ContainerBalancerCommands implements Callable, - SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ContainerBalancerCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -102,9 +100,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java index e58074bf140..9d7c270c962 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java @@ -19,9 +19,9 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.ozone.OzoneConsts; import picocli.CommandLine; @@ -31,10 +31,14 @@ import java.time.Duration; import java.time.Instant; import java.time.LocalDateTime; +import java.time.OffsetDateTime; import java.time.ZoneId; import java.util.List; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.util.DurationUtil.getPrettyDuration; +import static org.apache.hadoop.util.StringUtils.byteDesc; + /** * Handler to query status of container balancer. */ @@ -58,27 +62,42 @@ public class ContainerBalancerStatusSubcommand extends ScmSubcommand { public void execute(ScmClient scmClient) throws IOException { ContainerBalancerStatusInfoResponseProto response = scmClient.getContainerBalancerStatusInfo(); boolean isRunning = response.getIsRunning(); - ContainerBalancerStatusInfo balancerStatusInfo = response.getContainerBalancerStatusInfo(); + ContainerBalancerStatusInfoProto balancerStatusInfo = response.getContainerBalancerStatusInfo(); if (isRunning) { + Instant startedAtInstant = Instant.ofEpochSecond(balancerStatusInfo.getStartedAt()); LocalDateTime dateTime = - LocalDateTime.ofInstant(Instant.ofEpochSecond(balancerStatusInfo.getStartedAt()), ZoneId.systemDefault()); + LocalDateTime.ofInstant(startedAtInstant, ZoneId.systemDefault()); System.out.println("ContainerBalancer is Running."); if (verbose) { - System.out.printf("Started at: %s %s%n%n", dateTime.toLocalDate(), dateTime.toLocalTime()); + System.out.printf("Started at: %s %s%n", dateTime.toLocalDate(), dateTime.toLocalTime()); + Duration balancingDuration = Duration.between(startedAtInstant, OffsetDateTime.now()); + System.out.printf("Balancing duration: %s%n%n", getPrettyDuration(balancingDuration)); System.out.println(getConfigurationPrettyString(balancerStatusInfo.getConfiguration())); - List iterationsStatusInfoList + List iterationsStatusInfoList = balancerStatusInfo.getIterationsStatusInfoList(); System.out.println("Current iteration info:"); - System.out.println( - getPrettyIterationStatusInfo(iterationsStatusInfoList.get(iterationsStatusInfoList.size() - 1)) - ); + ContainerBalancerTaskIterationStatusInfoProto currentIterationStatistic = iterationsStatusInfoList.stream() + .filter(it -> it.getIterationResult().isEmpty()) + .findFirst() + .orElse(null); + if (currentIterationStatistic == null) { + System.out.println("-\n"); + } else { + System.out.println( + getPrettyIterationStatusInfo(currentIterationStatistic) + ); + } + if (verboseWithHistory) { System.out.println("Iteration history list:"); System.out.println( - iterationsStatusInfoList.stream().map(this::getPrettyIterationStatusInfo) + iterationsStatusInfoList + .stream() + .filter(it -> !it.getIterationResult().isEmpty()) + .map(this::getPrettyIterationStatusInfo) .collect(Collectors.joining("\n")) ); } @@ -134,21 +153,28 @@ String getConfigurationPrettyString(HddsProtos.ContainerBalancerConfigurationPro configuration.getExcludeDatanodes().isEmpty() ? "None" : configuration.getExcludeDatanodes()); } - private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatusInfo iterationStatusInfo) { + private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatusInfoProto iterationStatusInfo) { int iterationNumber = iterationStatusInfo.getIterationNumber(); String iterationResult = iterationStatusInfo.getIterationResult(); - long sizeScheduledForMove = iterationStatusInfo.getSizeScheduledForMoveGB(); - long dataSizeMovedGB = iterationStatusInfo.getDataSizeMovedGB(); + long iterationDuration = iterationStatusInfo.getIterationDuration(); + long sizeScheduledForMove = iterationStatusInfo.getSizeScheduledForMove(); + long dataSizeMoved = iterationStatusInfo.getDataSizeMoved(); long containerMovesScheduled = iterationStatusInfo.getContainerMovesScheduled(); long containerMovesCompleted = iterationStatusInfo.getContainerMovesCompleted(); long containerMovesFailed = iterationStatusInfo.getContainerMovesFailed(); long containerMovesTimeout = iterationStatusInfo.getContainerMovesTimeout(); - String enteringDataNodeList = iterationStatusInfo.getSizeEnteringNodesGBList() - .stream().map(nodeInfo -> nodeInfo.getUuid() + " <- " + nodeInfo.getDataVolumeGB() + "\n") + String enteringDataNodeList = iterationStatusInfo.getSizeEnteringNodesList() + .stream().map(nodeInfo -> nodeInfo.getUuid() + " <- " + byteDesc(nodeInfo.getDataVolume()) + "\n") .collect(Collectors.joining()); - String leavingDataNodeList = iterationStatusInfo.getSizeLeavingNodesGBList() - .stream().map(nodeInfo -> nodeInfo.getUuid() + " -> " + nodeInfo.getDataVolumeGB() + "\n") + if (enteringDataNodeList.isEmpty()) { + enteringDataNodeList = " -\n"; + } + String leavingDataNodeList = iterationStatusInfo.getSizeLeavingNodesList() + .stream().map(nodeInfo -> nodeInfo.getUuid() + " -> " + byteDesc(nodeInfo.getDataVolume()) + "\n") .collect(Collectors.joining()); + if (leavingDataNodeList.isEmpty()) { + leavingDataNodeList = " -\n"; + } return String.format( "%-50s %s%n" + "%-50s %s%n" + @@ -159,14 +185,16 @@ private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatus "%-50s %s%n" + "%-50s %s%n" + "%-50s %s%n" + + "%-50s %s%n" + "%-50s %n%s" + "%-50s %n%s", "Key", "Value", - "Iteration number", iterationNumber, + "Iteration number", iterationNumber == 0 ? "-" : iterationNumber, + "Iteration duration", getPrettyDuration(Duration.ofSeconds(iterationDuration)), "Iteration result", - iterationResult.isEmpty() ? "IN_PROGRESS" : iterationResult, - "Size scheduled to move", sizeScheduledForMove, - "Moved data size", dataSizeMovedGB, + iterationResult.isEmpty() ? "-" : iterationResult, + "Size scheduled to move", byteDesc(sizeScheduledForMove), + "Moved data size", byteDesc(dataSizeMoved), "Scheduled to move containers", containerMovesScheduled, "Already moved containers", containerMovesCompleted, "Failed to move containers", containerMovesFailed, @@ -174,5 +202,6 @@ private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatus "Entered data to nodes", enteringDataNodeList, "Exited data from nodes", leavingDataNodeList); } + } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 98d8bb0d83e..cf9ce1ca5d3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -83,6 +84,7 @@ public class ContainerOperationClient implements ScmClient { private final boolean containerTokenEnabled; private final OzoneConfiguration configuration; private XceiverClientManager xceiverClientManager; + private int maxCountOfContainerList; public synchronized XceiverClientManager getXceiverClientManager() throws IOException { @@ -110,13 +112,16 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { } containerTokenEnabled = conf.getBoolean(HDDS_CONTAINER_TOKEN_ENABLED, HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT); + maxCountOfContainerList = conf + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); } private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) throws IOException { XceiverClientManager manager; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, conf); + CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(conf); manager = new XceiverClientManager(conf, conf.getObject(XceiverClientManager.ScmClientConfig.class), new ClientTrustManager(caCerts, null)); @@ -339,17 +344,29 @@ public void deleteContainer(long containerID, boolean force) } @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { + if (count > maxCountOfContainerList) { + LOG.warn("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count); } @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { + if (count > maxCountOfContainerList) { + LOG.warn("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count, state, repType, replicationConfig); } @@ -519,6 +536,11 @@ public List getScmRatisRoles() throws IOException { return storageContainerLocationClient.getScmInfo().getRatisPeerRoles(); } + @Override + public boolean isScmRatisEnable() throws IOException { + return storageContainerLocationClient.getScmInfo().getScmRatisEnabled(); + } + @Override public boolean rotateSecretKeys(boolean force) throws IOException { return secretKeyClient.checkAndRotate(force); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java index cd5aba3a82e..a16e5227514 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -42,9 +41,8 @@ ReplicationManagerStopSubcommand.class, ReplicationManagerStatusSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ReplicationManagerCommands implements Callable, - SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ReplicationManagerCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -54,9 +52,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java index 6ba7cf29547..49f73e6faea 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -42,8 +41,8 @@ SafeModeExitSubcommand.class, SafeModeWaitSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class SafeModeCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class SafeModeCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -53,9 +52,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java index 9ac275fd5cb..72bca506939 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java @@ -33,9 +33,8 @@ import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -56,9 +55,9 @@ description = "Print a tree of the network topology as reported by SCM", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) +@MetaInfServices(AdminSubcommand.class) public class TopologySubcommand extends ScmSubcommand - implements SubcommandWithParent { + implements AdminSubcommand { private static final List STATES = new ArrayList<>(); @@ -137,11 +136,6 @@ public void execute(ScmClient scmClient) throws IOException { } } - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - // Format // Location: rack1 // ipAddress(hostName) OperationalState diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java index d466c9554ad..211e3bb0925 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -43,8 +42,8 @@ CleanExpiredCertsSubcommand.class, }) -@MetaInfServices(SubcommandWithParent.class) -public class CertCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class CertCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -54,9 +53,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 9f93c56f2db..89522ded68c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -19,13 +19,14 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -46,20 +47,22 @@ UpgradeSubcommand.class, ReconcileSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ContainerCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ContainerCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; + @ParentCommand + private OzoneAdmin parent; + @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - @Override - public Class getParentType() { - return OzoneAdmin.class; + public OzoneAdmin getParent() { + return parent; } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index ecc43d04087..88ccef702b3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.cli.container; import java.io.IOException; -import java.util.List; import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; @@ -26,7 +25,9 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -37,6 +38,7 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -55,10 +57,15 @@ public class ListSubcommand extends ScmSubcommand { private long startId; @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list", + description = "Maximum number of containers to list.", defaultValue = "20", showDefaultValue = Visibility.ALWAYS) private int count; + @Option(names = {"-a", "--all"}, + description = "List all containers.", + defaultValue = "false") + private boolean all; + @Option(names = {"--state"}, description = "Container state(OPEN, CLOSING, QUASI_CLOSED, CLOSED, " + "DELETING, DELETED)") @@ -75,6 +82,9 @@ public class ListSubcommand extends ScmSubcommand { private static final ObjectWriter WRITER; + @ParentCommand + private ContainerCommands parent; + static { ObjectMapper mapper = new ObjectMapper() .registerModule(new JavaTimeModule()) @@ -105,12 +115,49 @@ public void execute(ScmClient scmClient) throws IOException { ReplicationType.fromProto(type), replication, new OzoneConfiguration()); } - List containerList = - scmClient.listContainer(startId, count, state, type, repConfig); - // Output data list - for (ContainerInfo container : containerList) { - outputContainerInfo(container); + int maxCountAllowed = parent.getParent().getOzoneConf() + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + + ContainerListResult containerListAndTotalCount; + + if (!all) { + if (count > maxCountAllowed) { + System.err.printf("Attempting to list the first %d records of containers." + + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + + " maximum allowed count.%n", count, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = maxCountAllowed; + } + containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } + + if (containerListAndTotalCount.getTotalCount() > count) { + System.err.printf("Displaying %d out of %d containers. " + + "Container list has more containers.%n", + count, containerListAndTotalCount.getTotalCount()); + } + } else { + // Batch size is either count passed through cli or maxCountAllowed + int batchSize = (count > 0) ? count : maxCountAllowed; + long currentStartId = startId; + int fetchedCount; + + do { + // Fetch containers in batches of 'batchSize' + containerListAndTotalCount = scmClient.listContainer(currentStartId, batchSize, state, type, repConfig); + fetchedCount = containerListAndTotalCount.getContainerInfoList().size(); + + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } + + if (fetchedCount > 0) { + currentStartId = containerListAndTotalCount.getContainerInfoList().get(fetchedCount - 1).getContainerID() + 1; + } + } while (fetchedCount > 0); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index 8cb2114f57d..6c020e46f37 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; @@ -41,10 +40,11 @@ DecommissionSubCommand.class, MaintenanceSubCommand.class, RecommissionSubCommand.class, + StatusSubCommand.class, UsageInfoSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class DatanodeCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class DatanodeCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -54,9 +54,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index 9edcd3425a0..b33a5d1ea96 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Command; import java.util.concurrent.Callable; @@ -37,8 +35,7 @@ DecommissionStatusSubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class StatusSubCommand implements Callable, SubcommandWithParent { +public class StatusSubCommand implements Callable { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; @@ -48,9 +45,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return DatanodeCommands.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index b967fa0658c..2c069291a86 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,6 +155,8 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); + System.out.printf("%-13s: %d %n", "Pipeline(s)", + info.getPipelineCount()); System.out.printf("%-13s: %d %n", "Container(s)", info.getContainerCount()); System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated", @@ -192,6 +194,7 @@ private static class DatanodeUsage { private long committed = 0; private long freeSpaceToSpare = 0; private long containerCount = 0; + private long pipelineCount = 0; DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) { if (proto.hasNode()) { @@ -212,6 +215,9 @@ private static class DatanodeUsage { if (proto.hasContainerCount()) { containerCount = proto.getContainerCount(); } + if (proto.hasPipelineCount()) { + pipelineCount = proto.getPipelineCount(); + } if (proto.hasFreeSpaceToSpare()) { freeSpaceToSpare = proto.getFreeSpaceToSpare(); } @@ -277,5 +283,8 @@ public double getRemainingRatio() { return remaining / (double) capacity; } + public long getPipelineCount() { + return pipelineCount; + } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java index 7c70456995b..e5392ef618d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java @@ -59,12 +59,22 @@ public void execute(ScmClient scmClient) throws IOException { List pipelineList = new ArrayList<>(); Predicate predicate = replicationFilter.orElse(null); - for (Pipeline pipeline : scmClient.listPipelines()) { - boolean filterPassed = (predicate != null) && predicate.test(pipeline); - if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { - pipelineList.add(pipeline); + List pipelines = scmClient.listPipelines(); + if (predicate == null) { + for (Pipeline pipeline : pipelines) { + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED) { + pipelineList.add(pipeline); + } + } + } else { + for (Pipeline pipeline : pipelines) { + boolean filterPassed = predicate.test(pipeline); + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { + pipelineList.add(pipeline); + } } } + System.out.println("Sending close command for " + pipelineList.size() + " pipelines..."); pipelineList.forEach(pipeline -> { try { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java index ba7371e6214..9c391035560 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -44,8 +43,8 @@ CreatePipelineSubcommand.class, ClosePipelineSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class PipelineCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class PipelineCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -55,9 +54,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/DurationUtil.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/DurationUtil.java new file mode 100644 index 00000000000..7b2ded9b13d --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/DurationUtil.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.util; + +import java.time.Duration; + +import static java.lang.String.format; + +/** + * Pretty duration string representation. + */ +public final class DurationUtil { + + private DurationUtil() { + } + + /** + * Modify duration to string view. E.x. 1h 30m 45s, 2m 30s, 30s. + * + * @param duration duration + * @return duration in string format + */ + public static String getPrettyDuration(Duration duration) { + long hours = duration.toHours(); + long minutes = duration.getSeconds() / 60 % 60; + long seconds = duration.getSeconds() % 60; + if (hours > 0) { + return format("%dh %dm %ds", hours, minutes, seconds); + } else if (minutes > 0) { + return format("%dm %ds", minutes, seconds); + } else if (seconds >= 0) { + return format("%ds", seconds); + } else { + throw new IllegalStateException("Provided duration is incorrect: " + duration); + } + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/package-info.java similarity index 78% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java rename to hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/package-info.java index 8421ab19cfa..6dd25c12c53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/package-info.java @@ -14,17 +14,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + *

    */ -package org.apache.hadoop.hdds.cli; - /** - * Defineds parent command for SPI based subcommand registration. + * SCM related cli utils. */ -public interface SubcommandWithParent { - - /** - * Java type of the parent command. - */ - Class getParentType(); - -} +package org.apache.hadoop.hdds.util; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java index 41b419d2326..bdce0f5d707 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.scm.cli.datanode; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; import org.apache.hadoop.hdds.scm.cli.ContainerBalancerStartSubcommand; import org.apache.hadoop.hdds.scm.cli.ContainerBalancerStatusSubcommand; @@ -28,6 +28,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import picocli.CommandLine; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -39,6 +40,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -58,175 +61,393 @@ class TestContainerBalancerSubCommand { private ContainerBalancerStartSubcommand startCmd; private ContainerBalancerStatusSubcommand statusCmd; - @BeforeEach - public void setup() throws UnsupportedEncodingException { - stopCmd = new ContainerBalancerStopSubcommand(); - startCmd = new ContainerBalancerStartSubcommand(); - statusCmd = new ContainerBalancerStatusSubcommand(); - System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); - System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); - } - - @AfterEach - public void tearDown() { - System.setOut(originalOut); - System.setErr(originalErr); - } - - @Test - public void testContainerBalancerStatusInfoSubcommandRunning() - throws IOException { - ScmClient scmClient = mock(ScmClient.class); - - ContainerBalancerConfiguration config = new ContainerBalancerConfiguration(); - config.setThreshold(10); - config.setMaxDatanodesPercentageToInvolvePerIteration(20); - config.setMaxSizeToMovePerIteration(53687091200L); - config.setMaxSizeEnteringTarget(27917287424L); - config.setMaxSizeLeavingSource(27917287424L); - config.setIterations(2); - config.setExcludeNodes(""); - config.setMoveTimeout(3900000); - config.setMoveReplicationTimeout(3000000); - config.setBalancingInterval(0); - config.setIncludeNodes(""); - config.setExcludeNodes(""); - config.setNetworkTopologyEnable(false); - config.setTriggerDuEnable(false); - - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo iteration0StatusInfo = - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(0) + private static ContainerBalancerStatusInfoResponseProto getContainerBalancerStatusInfoResponseProto( + ContainerBalancerConfiguration config) { + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto iteration1StatusInfo = + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(1) .setIterationResult("ITERATION_COMPLETED") - .setSizeScheduledForMoveGB(48) - .setDataSizeMovedGB(48) + .setIterationDuration(400L) + .setSizeScheduledForMove(54 * GB) + .setDataSizeMoved(54 * GB) .setContainerMovesScheduled(11) .setContainerMovesCompleted(11) .setContainerMovesFailed(0) .setContainerMovesTimeout(0) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("80f6bc27-e6f3-493e-b1f4-25f810ad960d") - .setDataVolumeGB(27) + .setDataVolume(28 * GB) .build() ) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("701ca98e-aa1a-4b36-b817-e28ed634bba6") - .setDataVolumeGB(23L) + .setDataVolume(26 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("b8b9c511-c30f-4933-8938-2f272e307070") - .setDataVolumeGB(24L) + .setDataVolume(25 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("7bd99815-47e7-4015-bc61-ca6ef6dfd130") - .setDataVolumeGB(26L) + .setDataVolume(29 * GB) .build() ) .build(); - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo iteration1StatusInfo = - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(1) + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto iteration2StatusInfo = + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(2) .setIterationResult("ITERATION_COMPLETED") - .setSizeScheduledForMoveGB(48) - .setDataSizeMovedGB(48) - .setContainerMovesScheduled(11) - .setContainerMovesCompleted(11) + .setIterationDuration(300L) + .setSizeScheduledForMove(30 * GB) + .setDataSizeMoved(30 * GB) + .setContainerMovesScheduled(8) + .setContainerMovesCompleted(8) .setContainerMovesFailed(0) .setContainerMovesTimeout(0) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("80f6bc27-e6f3-493e-b1f4-25f810ad960d") - .setDataVolumeGB(27L) + .setDataVolume(20 * GB) .build() ) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("701ca98e-aa1a-4b36-b817-e28ed634bba6") - .setDataVolumeGB(23L) + .setDataVolume(10 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("b8b9c511-c30f-4933-8938-2f272e307070") - .setDataVolumeGB(24L) + .setDataVolume(15 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("7bd99815-47e7-4015-bc61-ca6ef6dfd130") - .setDataVolumeGB(26L) + .setDataVolume(15 * GB) .build() ) .build(); - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo iteration2StatusInfo = - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(1) + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto iteration3StatusInfo = + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(3) .setIterationResult("") - .setSizeScheduledForMoveGB(48) - .setDataSizeMovedGB(48) - .setContainerMovesScheduled(11) - .setContainerMovesCompleted(11) + .setIterationDuration(370L) + .setSizeScheduledForMove(48 * GB) + .setDataSizeMoved(48 * GB) + .setContainerMovesScheduled(5) + .setContainerMovesCompleted(5) .setContainerMovesFailed(0) .setContainerMovesTimeout(0) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("80f6bc27-e6f3-493e-b1f4-25f810ad960d") - .setDataVolumeGB(27L) + .setDataVolume(20 * GB) .build() ) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("701ca98e-aa1a-4b36-b817-e28ed634bba6") - .setDataVolumeGB(23L) + .setDataVolume(28 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("b8b9c511-c30f-4933-8938-2f272e307070") - .setDataVolumeGB(24L) + .setDataVolume(30 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("7bd99815-47e7-4015-bc61-ca6ef6dfd130") - .setDataVolumeGB(26L) + .setDataVolume(18 * GB) .build() ) .build(); - ContainerBalancerStatusInfoResponseProto statusInfoResponseProto = - ContainerBalancerStatusInfoResponseProto.newBuilder() + return ContainerBalancerStatusInfoResponseProto.newBuilder() .setIsRunning(true) - .setContainerBalancerStatusInfo(ContainerBalancerStatusInfo.newBuilder() + .setContainerBalancerStatusInfo(ContainerBalancerStatusInfoProto.newBuilder() .setStartedAt(OffsetDateTime.now().toEpochSecond()) .setConfiguration(config.toProtobufBuilder().setShouldRun(true)) .addAllIterationsStatusInfo( - Arrays.asList(iteration0StatusInfo, iteration1StatusInfo, iteration2StatusInfo) + Arrays.asList(iteration1StatusInfo, iteration2StatusInfo, iteration3StatusInfo) ) ) .build(); + } + + private static ContainerBalancerConfiguration getContainerBalancerConfiguration() { + ContainerBalancerConfiguration config = new ContainerBalancerConfiguration(); + config.setThreshold(10); + config.setMaxDatanodesPercentageToInvolvePerIteration(20); + config.setMaxSizeToMovePerIteration(53687091200L); + config.setMaxSizeEnteringTarget(27917287424L); + config.setMaxSizeLeavingSource(27917287424L); + config.setIterations(3); + config.setExcludeNodes(""); + config.setMoveTimeout(3900000); + config.setMoveReplicationTimeout(3000000); + config.setBalancingInterval(0); + config.setIncludeNodes(""); + config.setExcludeNodes(""); + config.setNetworkTopologyEnable(false); + config.setTriggerDuEnable(false); + return config; + } + + @BeforeEach + public void setup() throws UnsupportedEncodingException { + stopCmd = new ContainerBalancerStopSubcommand(); + startCmd = new ContainerBalancerStartSubcommand(); + statusCmd = new ContainerBalancerStatusSubcommand(); + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + } + + @AfterEach + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @Test + void testContainerBalancerStatusInfoSubcommandRunningWithoutFlags() + throws IOException { + ScmClient scmClient = mock(ScmClient.class); + + ContainerBalancerConfiguration config = + getContainerBalancerConfiguration(); + + ContainerBalancerStatusInfoResponseProto + statusInfoResponseProto = getContainerBalancerStatusInfoResponseProto(config); //test status is running when(scmClient.getContainerBalancerStatusInfo()).thenReturn(statusInfoResponseProto); - statusCmd.execute(scmClient); Pattern p = Pattern.compile( "^ContainerBalancer\\sis\\sRunning."); - Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + String output = outContent.toString(DEFAULT_ENCODING); + Matcher m = p.matcher(output); assertTrue(m.find()); + + String balancerConfigOutput = + "Container Balancer Configuration values:\n" + + "Key Value\n" + + "Threshold 10.0\n" + + "Max Datanodes to Involve per Iteration(percent) 20\n" + + "Max Size to Move per Iteration 0GB\n" + + "Max Size Entering Target per Iteration 26GB\n" + + "Max Size Leaving Source per Iteration 26GB\n" + + "Number of Iterations 3\n" + + "Time Limit for Single Container's Movement 65min\n" + + "Time Limit for Single Container's Replication 50min\n" + + "Interval between each Iteration 0min\n" + + "Whether to Enable Network Topology false\n" + + "Whether to Trigger Refresh Datanode Usage Info false\n" + + "Container IDs to Exclude from Balancing None\n" + + "Datanodes Specified to be Balanced None\n" + + "Datanodes Excluded from Balancing None"; + assertFalse(output.contains(balancerConfigOutput)); + + String currentIterationOutput = + "Current iteration info:\n" + + "Key Value\n" + + "Iteration number 3\n" + + "Iteration duration 1h 6m 40s\n" + + "Iteration result IN_PROGRESS\n" + + "Size scheduled to move 48 GB\n" + + "Moved data size 48 GB\n" + + "Scheduled to move containers 11\n" + + "Already moved containers 11\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 28 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 30 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 18 GB"; + assertFalse(output.contains(currentIterationOutput)); + + assertFalse(output.contains("Iteration history list:")); } @Test - public void testContainerBalancerStatusInfoSubcommandRunningOnStoppedBalancer() + void testContainerBalancerStatusInfoSubcommandVerboseHistory() throws IOException { ScmClient scmClient = mock(ScmClient.class); + ContainerBalancerConfiguration config = + getContainerBalancerConfiguration(); + + ContainerBalancerStatusInfoResponseProto + statusInfoResponseProto = getContainerBalancerStatusInfoResponseProto(config); //test status is running + when(scmClient.getContainerBalancerStatusInfo()).thenReturn(statusInfoResponseProto); + CommandLine c = new CommandLine(statusCmd); + c.parseArgs("--verbose", "--history"); + statusCmd.execute(scmClient); + String output = outContent.toString(DEFAULT_ENCODING); + Pattern p = Pattern.compile( + "^ContainerBalancer\\sis\\sRunning.$", Pattern.MULTILINE); + Matcher m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Started at: (\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2})$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Balancing duration: \\d{1}s$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + String balancerConfigOutput = + "Container Balancer Configuration values:\n" + + "Key Value\n" + + "Threshold 10.0\n" + + "Max Datanodes to Involve per Iteration(percent) 20\n" + + "Max Size to Move per Iteration 0GB\n" + + "Max Size Entering Target per Iteration 26GB\n" + + "Max Size Leaving Source per Iteration 26GB\n" + + "Number of Iterations 3\n" + + "Time Limit for Single Container's Movement 65min\n" + + "Time Limit for Single Container's Replication 50min\n" + + "Interval between each Iteration 0min\n" + + "Whether to Enable Network Topology false\n" + + "Whether to Trigger Refresh Datanode Usage Info false\n" + + "Container IDs to Exclude from Balancing None\n" + + "Datanodes Specified to be Balanced None\n" + + "Datanodes Excluded from Balancing None"; + assertTrue(output.contains(balancerConfigOutput)); + + assertTrue(output.contains("Iteration history list:")); + String firstHistoryIterationOutput = + "Key Value\n" + + "Iteration number 3\n" + + "Iteration duration 6m 10s\n" + + "Iteration result -\n" + + "Size scheduled to move 48 GB\n" + + "Moved data size 48 GB\n" + + "Scheduled to move containers 5\n" + + "Already moved containers 5\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 28 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 30 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 18 GB"; + assertTrue(output.contains(firstHistoryIterationOutput)); + + String secondHistoryIterationOutput = + "Key Value\n" + + "Iteration number 2\n" + + "Iteration duration 5m 0s\n" + + "Iteration result ITERATION_COMPLETED\n" + + "Size scheduled to move 30 GB\n" + + "Moved data size 30 GB\n" + + "Scheduled to move containers 8\n" + + "Already moved containers 8\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 10 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 15 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 15 GB"; + assertTrue(output.contains(secondHistoryIterationOutput)); + } + + @Test + void testContainerBalancerStatusInfoSubcommandVerbose() + throws IOException { + ScmClient scmClient = mock(ScmClient.class); + + ContainerBalancerConfiguration config = + getContainerBalancerConfiguration(); + + ContainerBalancerStatusInfoResponseProto + statusInfoResponseProto = getContainerBalancerStatusInfoResponseProto(config); + //test status is running + when(scmClient.getContainerBalancerStatusInfo()).thenReturn(statusInfoResponseProto); + CommandLine c = new CommandLine(statusCmd); + c.parseArgs("--verbose"); + statusCmd.execute(scmClient); + String output = outContent.toString(DEFAULT_ENCODING); + Pattern p = Pattern.compile( + "^ContainerBalancer\\sis\\sRunning.$", Pattern.MULTILINE); + Matcher m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Started at: (\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2})$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Balancing duration: \\d{1}s$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + String balancerConfigOutput = + "Container Balancer Configuration values:\n" + + "Key Value\n" + + "Threshold 10.0\n" + + "Max Datanodes to Involve per Iteration(percent) 20\n" + + "Max Size to Move per Iteration 0GB\n" + + "Max Size Entering Target per Iteration 26GB\n" + + "Max Size Leaving Source per Iteration 26GB\n" + + "Number of Iterations 3\n" + + "Time Limit for Single Container's Movement 65min\n" + + "Time Limit for Single Container's Replication 50min\n" + + "Interval between each Iteration 0min\n" + + "Whether to Enable Network Topology false\n" + + "Whether to Trigger Refresh Datanode Usage Info false\n" + + "Container IDs to Exclude from Balancing None\n" + + "Datanodes Specified to be Balanced None\n" + + "Datanodes Excluded from Balancing None"; + assertTrue(output.contains(balancerConfigOutput)); + + String currentIterationOutput = + "Current iteration info:\n" + + "Key Value\n" + + "Iteration number 3\n" + + "Iteration duration 6m 10s\n" + + "Iteration result -\n" + + "Size scheduled to move 48 GB\n" + + "Moved data size 48 GB\n" + + "Scheduled to move containers 5\n" + + "Already moved containers 5\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 28 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 30 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 18 GB"; + assertTrue(output.contains(currentIterationOutput)); + + assertFalse(output.contains("Iteration history list:")); + } + + @Test + void testContainerBalancerStatusInfoSubcommandRunningOnStoppedBalancer() + throws IOException { + ScmClient scmClient = mock(ScmClient.class); + + //test status is not running when(scmClient.getContainerBalancerStatusInfo()).thenReturn( ContainerBalancerStatusInfoResponseProto.newBuilder() .setIsRunning(false) diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java index 09f6621735e..a691e754606 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java @@ -94,6 +94,7 @@ public void testCorrectJsonValuesInReport() throws IOException { assertEquals(80.00, json.get(0).get("remainingPercent").doubleValue(), 0.001); assertEquals(5, json.get(0).get("containerCount").longValue()); + assertEquals(10, json.get(0).get("pipelineCount").longValue()); } @Test @@ -122,6 +123,7 @@ public void testOutputDataFieldsAligning() throws IOException { assertThat(output).contains("Remaining :"); assertThat(output).contains("Remaining % :"); assertThat(output).contains("Container(s) :"); + assertThat(output).contains("Pipeline(s) :"); assertThat(output).contains("Container Pre-allocated :"); assertThat(output).contains("Remaining Allocatable :"); assertThat(output).contains("Free Space To Spare :"); @@ -135,6 +137,7 @@ private List getUsageProto() { .setRemaining(80) .setUsed(10) .setContainerCount(5) + .setPipelineCount(10) .build()); return result; } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java new file mode 100644 index 00000000000..013350fe871 --- /dev/null +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.pipeline; + +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import picocli.CommandLine; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for the ClosePipelineSubcommand class. + */ +class TestClosePipelinesSubCommand { + + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + private ClosePipelineSubcommand cmd; + private ScmClient scmClient; + + public static Stream values() { + return Stream.of( + arguments( + new String[]{"--all"}, + "Sending close command for 2 pipelines...\n", + "with empty parameters" + ), + arguments( + new String[]{"--all", "-ffc", "THREE"}, + "Sending close command for 1 pipelines...\n", + "by filter factor, opened" + ), + arguments( + new String[]{"--all", "-ffc", "ONE"}, + "Sending close command for 0 pipelines...\n", + "by filter factor, closed" + ), + arguments( + new String[]{"--all", "-r", "rs-3-2-1024k", "-t", "EC"}, + "Sending close command for 1 pipelines...\n", + "by replication and type, opened" + ), + arguments( + new String[]{"--all", "-r", "rs-6-3-1024k", "-t", "EC"}, + "Sending close command for 0 pipelines...\n", + "by replication and type, closed" + ), + arguments( + new String[]{"--all", "-t", "EC"}, + "Sending close command for 1 pipelines...\n", + "by type, opened" + ), + arguments( + new String[]{"--all", "-t", "RS"}, + "Sending close command for 0 pipelines...\n", + "by type, closed" + ) + ); + } + + @BeforeEach + public void setup() throws IOException { + cmd = new ClosePipelineSubcommand(); + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + + scmClient = mock(ScmClient.class); + when(scmClient.listPipelines()).thenAnswer(invocation -> createPipelines()); + } + + @AfterEach + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @ParameterizedTest(name = "{index}. {2}") + @MethodSource("values") + void testCloseAllPipelines(String[] commands, String expectedOutput, String testName) throws IOException { + CommandLine c = new CommandLine(cmd); + c.parseArgs(commands); + cmd.execute(scmClient); + assertEquals(expectedOutput, outContent.toString(DEFAULT_ENCODING)); + } + + private List createPipelines() { + List pipelines = new ArrayList<>(); + pipelines.add(createPipeline(StandaloneReplicationConfig.getInstance(ONE), + Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), + Pipeline.PipelineState.OPEN)); + pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), + Pipeline.PipelineState.CLOSED)); + + pipelines.add(createPipeline( + new ECReplicationConfig(3, 2), Pipeline.PipelineState.OPEN)); + pipelines.add(createPipeline( + new ECReplicationConfig(3, 2), Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline( + new ECReplicationConfig(6, 3), Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline( + RatisReplicationConfig.getInstance(THREE), Pipeline.PipelineState.CLOSED)); + return pipelines; + } + + private Pipeline createPipeline(ReplicationConfig repConfig, + Pipeline.PipelineState state) { + return new Pipeline.Builder() + .setId(PipelineID.randomId()) + .setCreateTimestamp(System.currentTimeMillis()) + .setState(state) + .setReplicationConfig(repConfig) + .setNodes(createDatanodeDetails(1)) + .build(); + } + + private List createDatanodeDetails(int count) { + List dns = new ArrayList<>(); + for (int i = 0; i < count; i++) { + HddsProtos.DatanodeDetailsProto dnd = + HddsProtos.DatanodeDetailsProto.newBuilder() + .setHostName("host" + i) + .setIpAddress("1.2.3." + i + 1) + .setNetworkLocation("/default") + .setNetworkName("host" + i) + .addPorts(HddsProtos.Port.newBuilder() + .setName("ratis").setValue(5678).build()) + .setUuid(UUID.randomUUID().toString()) + .build(); + dns.add(DatanodeDetails.getFromProtoBuf(dnd)); + } + return dns; + } +} diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/util/TestDurationUtil.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/util/TestDurationUtil.java new file mode 100644 index 00000000000..7b0a9548639 --- /dev/null +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/util/TestDurationUtil.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.util; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +class TestDurationUtil { + + private static Stream paramsForPositiveCases() { + return Stream.of( + arguments( + "0s", + Duration.ZERO + ), + arguments( + "2562047788015215h 30m 7s", + Duration.ofSeconds(Long.MAX_VALUE) + ), + arguments( + "1s", + Duration.ofSeconds(1) + ), + arguments( + "30s", + Duration.ofSeconds(30) + ), + arguments( + "1m 0s", + Duration.ofMinutes(1) + ), + arguments( + "2m 30s", + Duration.ofMinutes(2).plusSeconds(30) + ), + arguments( + "1h 30m 45s", + Duration.ofHours(1).plusMinutes(30).plusSeconds(45) + ), + arguments( + "24h 0m 0s", + Duration.ofDays(1) + ), + arguments( + "48h 0m 0s", + Duration.ofDays(2) + ) + ); + } + + private static Collection paramsForNegativeCases() { + return Arrays.asList(Duration.ofSeconds(-1L), Duration.ofSeconds(Long.MIN_VALUE)); + } + + @ParameterizedTest + @MethodSource("paramsForPositiveCases") + void testDuration(String expected, Duration actual) { + assertEquals(expected, DurationUtil.getPrettyDuration(actual)); + } + + @ParameterizedTest + @MethodSource("paramsForNegativeCases") + void testDuration(Duration param) { + assertThrows(IllegalStateException.class, () -> DurationUtil.getPrettyDuration(param)); + } +} + diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 545faba51ac..6b5a1ac0c8b 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Client Apache Ozone Client jar diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 65dce09cba1..56ca8798f22 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -217,7 +217,7 @@ public S3SecretValue getS3Secret(String kerberosID, boolean createIfNotExist) * Set secretKey for accessId. * @param accessId * @param secretKey - * @return S3SecretValue pair + * @return {@code S3SecretValue } pair * @throws IOException */ public S3SecretValue setS3Secret(String accessId, String secretKey) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 216b51b8e86..1a40b536909 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -985,8 +985,23 @@ public OzoneDataStreamOutput createStreamFile(String keyName, long size, */ public List listStatus(String keyName, boolean recursive, String startKey, long numEntries) throws IOException { - return proxy - .listStatus(volumeName, name, keyName, recursive, startKey, numEntries); + return proxy.listStatus(volumeName, name, keyName, recursive, startKey, numEntries); + } + + /** + * List the lightweight status for a file or a directory and its contents. + * + * @param keyName Absolute path of the entry to be listed + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey exists + * its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @return list of file status + */ + public List listStatusLight(String keyName, boolean recursive, + String startKey, long numEntries) throws IOException { + return proxy.listStatusLight(volumeName, name, keyName, recursive, startKey, numEntries, false); } /** @@ -1046,6 +1061,37 @@ public void setTimes(String keyName, long mtime, long atime) proxy.setTimes(ozoneObj, keyName, mtime, atime); } + /** + * Gets the tags for an existing key. + * @param keyName Key name. + * @return Tags for the specified key. + * @throws IOException + */ + public Map getObjectTagging(String keyName) + throws IOException { + return proxy.getObjectTagging(volumeName, name, keyName); + } + + /** + * Sets the tags to an existing key. + * @param keyName Key name. + * @param tags Tags to set on the key. + * @throws IOException + */ + public void putObjectTagging(String keyName, Map tags) + throws IOException { + proxy.putObjectTagging(volumeName, name, keyName, tags); + } + + /** + * Removes all the tags from an existing key. + * @param keyName Key name + * @throws IOException + */ + public void deleteObjectTagging(String keyName) throws IOException { + proxy.deleteObjectTagging(volumeName, name, keyName); + } + public void setSourcePathExist(boolean b) { this.sourcePathExist = b; } @@ -1762,7 +1808,6 @@ private boolean getChildrenKeys(String keyPrefix, String startKey, // 1. Get immediate children of keyPrefix, starting with startKey List statuses = proxy.listStatusLight(volumeName, name, keyPrefix, false, startKey, listCacheSize, true); - boolean reachedLimitCacheSize = statuses.size() == listCacheSize; // 2. Special case: ListKey expects keyPrefix element should present in // the resultList, only if startKey is blank. If startKey is not blank @@ -1794,7 +1839,7 @@ private boolean getChildrenKeys(String keyPrefix, String startKey, // Return it so that the next iteration will be // started using the stacked items. return true; - } else if (reachedLimitCacheSize && indx == statuses.size() - 1) { + } else if (indx == statuses.size() - 1) { // The last element is a FILE and reaches the listCacheSize. // Now, sets next seek key to this element stack.push(new ImmutablePair<>(keyPrefix, keyInfo.getKeyName())); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java index 3a63a593469..8bd648545d4 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java @@ -26,6 +26,7 @@ import java.io.IOException; import com.google.common.annotations.VisibleForTesting; +import org.apache.ratis.util.UncheckedAutoCloseable; /** * OzoneClient connects to Ozone Cluster and @@ -76,6 +77,7 @@ public class OzoneClient implements Closeable { private final ClientProtocol proxy; private final ObjectStore objectStore; private ConfigurationSource conf; + private final UncheckedAutoCloseable leakTracker = OzoneClientFactory.track(this); /** * Creates a new OzoneClient object, generally constructed @@ -119,7 +121,11 @@ public ConfigurationSource getConfiguration() { */ @Override public void close() throws IOException { - proxy.close(); + try { + proxy.close(); + } finally { + leakTracker.close(); + } } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java index 2e9080a66f8..e2d87921b5c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.ozone.client; +import java.io.IOException; + /** * This exception is thrown by the Ozone Clients. */ -public class OzoneClientException extends Exception { +public class OzoneClientException extends IOException { public OzoneClientException() { } @@ -36,8 +38,4 @@ public OzoneClientException(Throwable throwable) { super(throwable); } - public OzoneClientException(String s, Throwable throwable, boolean b, - boolean b1) { - super(s, throwable, b, b1); - } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 44239aafceb..1c673618d07 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -23,9 +23,11 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.LeakDetector; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -34,13 +36,17 @@ import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.token.Token; -import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; +import org.apache.ratis.util.UncheckedAutoCloseable; + +import com.google.common.base.Preconditions; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; + /** * Factory class to create OzoneClients. */ @@ -54,6 +60,21 @@ public final class OzoneClientFactory { */ private OzoneClientFactory() { } + private static final LeakDetector OZONE_CLIENT_LEAK_DETECTOR = + new LeakDetector("OzoneClientObject"); + + public static UncheckedAutoCloseable track(AutoCloseable object) { + final Class clazz = object.getClass(); + final StackTraceElement[] stackTrace = HddsUtils.getStackTrace(LOG); + return OZONE_CLIENT_LEAK_DETECTOR.track(object, + () -> HddsUtils.reportLeak(clazz, + HddsUtils.formatStackTrace(stackTrace, 4), LOG)); + } + + public static Logger getLogger() { + return LOG; + } + /** * Constructs and return an OzoneClient with default configuration. @@ -170,7 +191,7 @@ private static OzoneClient getRpcClient(ClientProtocol clientProtocol, * Create OzoneClient for token renew/cancel operations. * @param conf Configuration to be used for OzoneCient creation * @param token ozone token is involved - * @return + * @return OzoneClient * @throws IOException */ public static OzoneClient getOzoneClient(Configuration conf, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 9ab110aa2b5..e914b2db212 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -20,12 +20,13 @@ import java.io.IOException; import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import org.apache.commons.collections.ListUtils; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -124,7 +125,7 @@ protected OzoneVolume(Builder builder) { this.creationTime.getEpochSecond(), this.creationTime.getNano()); } } - this.acls = builder.acls; + this.acls = new ArrayList<>(builder.acls); if (builder.conf != null) { this.listCacheSize = HddsClientUtils.getListCacheSize(builder.conf); } @@ -203,7 +204,7 @@ public Instant getModificationTime() { * @return aclMap */ public List getAcls() { - return ListUtils.unmodifiableList(acls); + return Collections.unmodifiableList(acls); } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java index eed7a6829c9..9a0083b0210 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java @@ -59,14 +59,13 @@ public boolean getForceCreationWhenVolumeExists() { * * @return Builder */ - public static TenantArgs.Builder newBuilder() { - return new TenantArgs.Builder(); + public static Builder newBuilder() { + return new Builder(); } /** * Builder for TenantArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String volumeName; private boolean forceCreationWhenVolumeExists; @@ -77,12 +76,12 @@ public static class Builder { public Builder() { } - public TenantArgs.Builder setVolumeName(String volumeName) { + public Builder setVolumeName(String volumeName) { this.volumeName = volumeName; return this; } - public TenantArgs.Builder setForceCreationWhenVolumeExists( + public Builder setForceCreationWhenVolumeExists( boolean forceCreationWhenVolumeExists) { this.forceCreationWhenVolumeExists = forceCreationWhenVolumeExists; return this; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java index 2d83d88ed5e..76baefd71dd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java @@ -20,6 +20,8 @@ import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.XceiverClientFactory; @@ -38,6 +40,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.List; /** @@ -150,6 +153,90 @@ protected void setChecksumType(ContainerProtos.ChecksumType type) { checksumType = type; } + protected abstract AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength); + + protected abstract String populateBlockChecksumBuf(ByteBuffer blockChecksumByteBuffer) throws IOException; + + protected abstract List getChunkInfos( + OmKeyLocationInfo keyLocationInfo) throws IOException; + + protected ByteBuffer getBlockChecksumFromChunkChecksums(AbstractBlockChecksumComputer blockChecksumComputer) + throws IOException { + blockChecksumComputer.compute(getCombineMode()); + return blockChecksumComputer.getOutByteBuffer(); + } + + /** + * Compute block checksums block by block and append the raw bytes of the + * block checksums into getBlockChecksumBuf(). + * + * @throws IOException + */ + protected void checksumBlocks() throws IOException { + long currentLength = 0; + for (int blockIdx = 0; + blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; + blockIdx++) { + OmKeyLocationInfo keyLocationInfo = + getKeyLocationInfoList().get(blockIdx); + if (currentLength > getLength()) { + return; + } + + if (!checksumBlock(keyLocationInfo)) { + throw new PathIOException(getSrc(), + "Fail to get block checksum for " + keyLocationInfo + + ", checksum combine mode: " + getCombineMode()); + } + + currentLength += keyLocationInfo.getLength(); + } + } + + /** + * Return true when sounds good to continue or retry, false when severe + * condition or totally failed. + */ + protected boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) + throws IOException { + // for each block, send request + List chunkInfos = + getChunkInfos(keyLocationInfo); + if (chunkInfos.isEmpty()) { + return false; + } + + long blockNumBytes = keyLocationInfo.getLength(); + + if (getRemaining() < blockNumBytes) { + blockNumBytes = getRemaining(); + } + setRemaining(getRemaining() - blockNumBytes); + + ContainerProtos.ChecksumData checksumData = + chunkInfos.get(0).getChecksumData(); + setChecksumType(checksumData.getType()); + int bytesPerChecksum = checksumData.getBytesPerChecksum(); + setBytesPerCRC(bytesPerChecksum); + + AbstractBlockChecksumComputer blockChecksumComputer = getBlockChecksumComputer(chunkInfos, + keyLocationInfo.getLength()); + ByteBuffer blockChecksumByteBuffer = + getBlockChecksumFromChunkChecksums(blockChecksumComputer); + String blockChecksumForDebug = + populateBlockChecksumBuf(blockChecksumByteBuffer); + + LOG.debug("Got reply from {} {} for block {}: blockChecksum={}, " + + "blockChecksumType={}", + keyInfo.getReplicationConfig().getReplicationType() == HddsProtos.ReplicationType.EC + ? "EC pipeline" : "pipeline", + keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), + blockChecksumForDebug, checksumData.getType()); + + return true; + } + /** * Request the blocks created in the most recent version from Ozone Manager. * @@ -219,14 +306,6 @@ public void compute() throws IOException { } } - /** - * Compute block checksums block by block and append the raw bytes of the - * block checksums into getBlockChecksumBuf(). - * - * @throws IOException - */ - protected abstract void checksumBlocks() throws IOException; - /** * Make final file checksum result given the per-block or per-block-group * checksums collected into getBlockChecksumBuf(). diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java index b2c30ed9e08..a4c24768cdd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java @@ -45,12 +45,14 @@ public class ECBlockChecksumComputer extends AbstractBlockChecksumComputer { private final List chunkInfoList; private final OmKeyInfo keyInfo; + private final long blockLength; public ECBlockChecksumComputer( - List chunkInfoList, OmKeyInfo keyInfo) { + List chunkInfoList, OmKeyInfo keyInfo, long blockLength) { this.chunkInfoList = chunkInfoList; this.keyInfo = keyInfo; + this.blockLength = blockLength; } @Override @@ -72,15 +74,13 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode) private void computeMd5Crc() { Preconditions.checkArgument(chunkInfoList.size() > 0); - final ContainerProtos.ChunkInfo firstChunkInfo = chunkInfoList.get(0); - long chunkSize = firstChunkInfo.getLen(); - long bytesPerCrc = firstChunkInfo.getChecksumData().getBytesPerChecksum(); - // Total parity checksum bytes per stripe to remove - int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - final MessageDigest digester = MD5Hash.getDigester(); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { + long chunkSize = chunkInfo.getLen(); + long bytesPerCrc = chunkInfo.getChecksumData().getBytesPerChecksum(); + // Total parity checksum bytes per stripe to remove + int parityBytes = getParityBytes(chunkSize, bytesPerCrc); ByteString stripeChecksum = chunkInfo.getStripeChecksum(); Preconditions.checkNotNull(stripeChecksum); @@ -121,66 +121,40 @@ private void computeCompositeCrc() throws IOException { // Bytes required to create a CRC long bytesPerCrc = firstChunkInfo.getChecksumData().getBytesPerChecksum(); - long chunkSize = firstChunkInfo.getLen(); - - //When EC chunk size is not a multiple of ozone.client.bytes.per.checksum - // (default = 16KB) the last checksum in an EC chunk is only generated for - // offset. - long bytesPerCrcOffset = chunkSize % bytesPerCrc; - - long keySize = keyInfo.getDataSize(); - // Total parity checksum bytes per stripe to remove - int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - - // Number of checksum per chunk, Eg: 2MB EC chunk will - // have 2 checksum per chunk. - int numChecksumPerChunk = (int) - (Math.ceil((double) chunkSize / bytesPerCrc)); + long blockSize = blockLength; CrcComposer blockCrcComposer = CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { ByteString stripeChecksum = chunkInfo.getStripeChecksum(); + long chunkSize = chunkInfo.getLen(); + + // Total parity checksum bytes per stripe to remove + int parityBytes = getParityBytes(chunkSize, bytesPerCrc); Preconditions.checkNotNull(stripeChecksum); final int checksumSize = stripeChecksum.size(); Preconditions.checkArgument(checksumSize % 4 == 0, "Checksum Bytes size does not match"); - CrcComposer chunkCrcComposer = - CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); // Limit parity bytes as they do not contribute to fileChecksum final ByteBuffer byteWrap = stripeChecksum.asReadOnlyByteBuffer(); byteWrap.limit(checksumSize - parityBytes); - long chunkOffsetIndex = 1; while (byteWrap.hasRemaining()) { - - /* - When chunk size is not a multiple of bytes.per.crc we get an offset. - For eg, RS-3-2-1524k is not a multiple of 1MB. So two checksums are - generated 1st checksum for 1024k bytes and 2nd checksum for 500k bytes. - When we reach the 2nd Checksum we need to modify the bytesPerCrc as in - this case 500k is the bytes for which the checksum is generated. - */ - long currentChunkOffset = Long.MAX_VALUE; - if ((chunkOffsetIndex % numChecksumPerChunk == 0) - && (bytesPerCrcOffset > 0)) { - currentChunkOffset = bytesPerCrcOffset; + // Here Math.min in mainly required for last stripe's last chunk. The last chunk of the last stripe can be + // less than the chunkSize, chunkSize is only calculated from each stripe's first chunk. This would be fine + // for rest of the stripe because all the chunks are of the same size. But for the last stripe we don't know + // the exact size of the last chunk. So we calculate it with the of blockSize. If the block size is smaller + // than the chunk size, then we know it is the last stripe' last chunk. + long remainingChunkSize = Math.min(blockSize, chunkSize); + while (byteWrap.hasRemaining() && remainingChunkSize > 0) { + final int checksumData = byteWrap.getInt(); + blockCrcComposer.update(checksumData, Math.min(bytesPerCrc, remainingChunkSize)); + remainingChunkSize -= bytesPerCrc; } - - final int checksumDataCrc = byteWrap.getInt(); - //To handle last chunk when it size is lower than 1524K in the case - // of rs-3-2-1524k. - long chunkSizePerChecksum = Math.min(Math.min(keySize, bytesPerCrc), - currentChunkOffset); - chunkCrcComposer.update(checksumDataCrc, chunkSizePerChecksum); - - int chunkChecksumCrc = CrcUtil.readInt(chunkCrcComposer.digest(), 0); - blockCrcComposer.update(chunkChecksumCrc, chunkSizePerChecksum); - keySize -= Math.min(bytesPerCrc, currentChunkOffset); - ++chunkOffsetIndex; + blockSize -= chunkSize; } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java index 13ba5716987..db36b9837ad 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.client.checksum; -import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -46,7 +45,6 @@ * The helper class to compute file checksum for EC files. */ public class ECFileChecksumHelper extends BaseFileChecksumHelper { - private int blockIdx; public ECFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, String keyName, long length, OzoneClientConfig.ChecksumCombineMode @@ -57,63 +55,13 @@ public ECFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, } @Override - protected void checksumBlocks() throws IOException { - long currentLength = 0; - for (blockIdx = 0; - blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; - blockIdx++) { - OmKeyLocationInfo keyLocationInfo = - getKeyLocationInfoList().get(blockIdx); - - if (currentLength > getLength()) { - return; - } - - if (!checksumBlock(keyLocationInfo)) { - throw new PathIOException(getSrc(), - "Fail to get block checksum for " + keyLocationInfo - + ", checksum combine mode: " + getCombineMode()); - } - - currentLength += keyLocationInfo.getLength(); - } - } - - private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) - throws IOException { - // for each block, send request - List chunkInfos = - getChunkInfos(keyLocationInfo); - if (chunkInfos.size() == 0) { - return false; - } - - long blockNumBytes = keyLocationInfo.getLength(); - - if (getRemaining() < blockNumBytes) { - blockNumBytes = getRemaining(); - } - setRemaining(getRemaining() - blockNumBytes); - - ContainerProtos.ChecksumData checksumData = - chunkInfos.get(0).getChecksumData(); - setChecksumType(checksumData.getType()); - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - setBytesPerCRC(bytesPerChecksum); - - ByteBuffer blockChecksumByteBuffer = - getBlockChecksumFromChunkChecksums(chunkInfos); - String blockChecksumForDebug = - populateBlockChecksumBuf(blockChecksumByteBuffer); - - LOG.debug("Got reply from EC pipeline {} for block {}: blockChecksum={}, " + - "blockChecksumType={}", - keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), - blockChecksumForDebug, checksumData.getType()); - return true; + protected AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength) { + return new ECBlockChecksumComputer(chunkInfos, getKeyInfo(), blockLength); } - private String populateBlockChecksumBuf( + @Override + protected String populateBlockChecksumBuf( ByteBuffer blockChecksumByteBuffer) throws IOException { String blockChecksumForDebug = null; switch (getCombineMode()) { @@ -139,18 +87,9 @@ private String populateBlockChecksumBuf( return blockChecksumForDebug; } - private ByteBuffer getBlockChecksumFromChunkChecksums( - List chunkInfos) throws IOException { - - AbstractBlockChecksumComputer blockChecksumComputer = - new ECBlockChecksumComputer(chunkInfos, getKeyInfo()); - blockChecksumComputer.compute(getCombineMode()); - - return blockChecksumComputer.getOutByteBuffer(); - } - - private List getChunkInfos(OmKeyLocationInfo - keyLocationInfo) throws IOException { + @Override + protected List getChunkInfos(OmKeyLocationInfo + keyLocationInfo) throws IOException { // To read an EC block, we create a STANDALONE pipeline that contains the // single location for the block index we want to read. The EC blocks are // indexed from 1 to N, however the data locations are stored in the diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java index 016121ce1a9..9c2df0fdb47 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.client.checksum; -import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -44,7 +43,6 @@ * The helper class to compute file checksum for replicated files. */ public class ReplicatedFileChecksumHelper extends BaseFileChecksumHelper { - private int blockIdx; public ReplicatedFileChecksumHelper( OzoneVolume volume, OzoneBucket bucket, String keyName, long length, @@ -61,65 +59,10 @@ public ReplicatedFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, keyInfo); } - @Override - protected void checksumBlocks() throws IOException { - long currentLength = 0; - for (blockIdx = 0; - blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; - blockIdx++) { - OmKeyLocationInfo keyLocationInfo = - getKeyLocationInfoList().get(blockIdx); - if (currentLength > getLength()) { - return; - } - - if (!checksumBlock(keyLocationInfo)) { - throw new PathIOException(getSrc(), - "Fail to get block checksum for " + keyLocationInfo - + ", checksum combine mode: " + getCombineMode()); - } - - currentLength += keyLocationInfo.getLength(); - } - } - - /** - * Return true when sounds good to continue or retry, false when severe - * condition or totally failed. - */ - private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) - throws IOException { - // for each block, send request - List chunkInfos = - getChunkInfos(keyLocationInfo); - if (chunkInfos.size() == 0) { - return false; - } - - long blockNumBytes = keyLocationInfo.getLength(); - - if (getRemaining() < blockNumBytes) { - blockNumBytes = getRemaining(); - } - setRemaining(getRemaining() - blockNumBytes); - - ContainerProtos.ChecksumData checksumData = - chunkInfos.get(0).getChecksumData(); - setChecksumType(checksumData.getType()); - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - setBytesPerCRC(bytesPerChecksum); - - ByteBuffer blockChecksumByteBuffer = getBlockChecksumFromChunkChecksums( - keyLocationInfo, chunkInfos); - String blockChecksumForDebug = - populateBlockChecksumBuf(blockChecksumByteBuffer); - - LOG.debug("got reply from pipeline {} for block {}: blockChecksum={}, " + - "blockChecksumType={}", - keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), - blockChecksumForDebug, checksumData.getType()); - return true; + protected AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength) { + return new ReplicatedBlockChecksumComputer(chunkInfos); } // copied from BlockInputStream @@ -127,6 +70,7 @@ private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) * Send RPC call to get the block info from the container. * @return List of chunks in this block. */ + @Override protected List getChunkInfos( OmKeyLocationInfo keyLocationInfo) throws IOException { // irrespective of the container state, we will always read via Standalone @@ -164,18 +108,6 @@ protected List getChunkInfos( return chunks; } - // TODO: copy BlockChecksumHelper here - ByteBuffer getBlockChecksumFromChunkChecksums( - OmKeyLocationInfo keyLocationInfo, - List chunkInfoList) - throws IOException { - AbstractBlockChecksumComputer blockChecksumComputer = - new ReplicatedBlockChecksumComputer(chunkInfoList); - blockChecksumComputer.compute(getCombineMode()); - - return blockChecksumComputer.getOutByteBuffer(); - } - /** * Parses out the raw blockChecksum bytes from {@code checksumData} byte * buffer according to the blockChecksumType and populates the cumulative @@ -184,7 +116,8 @@ ByteBuffer getBlockChecksumFromChunkChecksums( * @return a debug-string representation of the parsed checksum if * debug is enabled, otherwise null. */ - String populateBlockChecksumBuf(ByteBuffer checksumData) + @Override + protected String populateBlockChecksumBuf(ByteBuffer checksumData) throws IOException { String blockChecksumForDebug = null; switch (getCombineMode()) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 4f9e5db49a9..b5f8191d368 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -303,7 +303,7 @@ private int writeToOutputStream(BlockOutputStreamEntry current, if (retry) { current.writeOnRetry(len); } else { - waitForRetryHandling(current); + current.waitForRetryHandling(retryHandlingCondition); current.write(b, off, writeLen); offset += writeLen; } @@ -584,7 +584,7 @@ private void handleFlushOrClose(StreamAction op) throws IOException { blockOutputStreamEntryPool.getCurrentStreamEntry(); if (entry != null) { // If the current block is to handle retries, wait until all the retries are done. - waitForRetryHandling(entry); + doInWriteLock(() -> entry.waitForRetryHandling(retryHandlingCondition)); entry.registerCallReceived(); try { handleStreamAction(entry, op); @@ -608,10 +608,6 @@ private void handleFlushOrClose(StreamAction op) throws IOException { } } - private void waitForRetryHandling(BlockOutputStreamEntry currentEntry) throws InterruptedException { - doInWriteLock(() -> currentEntry.waitForRetryHandling(retryHandlingCondition)); - } - private void handleStreamAction(BlockOutputStreamEntry entry, StreamAction op) throws IOException { Collection failedServers = entry.getFailedServers(); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java index 36031a9cf4d..dc85fffe1ca 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStreamSemaphore.java @@ -32,7 +32,7 @@ public class KeyOutputStreamSemaphore { private final Semaphore requestSemaphore; KeyOutputStreamSemaphore(int maxConcurrentWritePerKey) { - LOG.info("Initializing semaphore with maxConcurrentWritePerKey = {}", maxConcurrentWritePerKey); + LOG.debug("Initializing semaphore with maxConcurrentWritePerKey = {}", maxConcurrentWritePerKey); if (maxConcurrentWritePerKey > 0) { requestSemaphore = new Semaphore(maxConcurrentWritePerKey); } else if (maxConcurrentWritePerKey == 0) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 16211ebbb8e..c0bffaf8950 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -59,7 +59,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.TenantStateList; @@ -514,39 +513,6 @@ List listKeys(String volumeName, String bucketName, String keyPrefix, String prevKey, int maxListResult) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, - int maxKeys) - throws IOException; - - /** - * Recover trash allows the user to recover keys that were marked as deleted, - * but not actually deleted by Ozone Manager. - * @param volumeName - The volume name. - * @param bucketName - The bucket name. - * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. - * @return The result of recovering operation is success or not. - * @throws IOException - */ - boolean recoverTrash(String volumeName, String bucketName, String keyName, - String destinationBucket) throws IOException; - /** * Get OzoneKey. * @param volumeName Name of the Volume @@ -1345,4 +1311,38 @@ void setTimes(OzoneObj obj, String keyName, long mtime, long atime) * @throws IOException */ void recoverKey(OmKeyArgs args, long clientID) throws IOException; + + /** + * Gets the tags for an existing key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @return tags for the specified key. + * @throws IOException + */ + Map getObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException; + + /** + * Sets the tags to an existing key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @param tags Tags to set on the key. + * @throws IOException + */ + void putObjectTagging(String volumeName, String bucketName, String keyName, + Map tags) throws IOException; + + + /** + * Removes all the tags from the specified key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @throws IOException + */ + void deleteObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException; + } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index bfeb9c1e6c1..93c675d9b90 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -34,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -120,10 +121,8 @@ import org.apache.hadoop.ozone.om.helpers.OmTenantArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -141,9 +140,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.security.GDPRSymmetricKey; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.ListSnapshotResponse; @@ -161,7 +157,6 @@ import java.security.InvalidKeyException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -176,7 +171,7 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneAcl.LINK_BUCKET_DEFAULT_ACL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY; @@ -185,8 +180,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD; import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; /** * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode @@ -210,8 +203,6 @@ public class RpcClient implements ClientProtocol { private final XceiverClientFactory xceiverClientManager; private final UserGroupInformation ugi; private UserGroupInformation s3gUgi; - private final ACLType userRights; - private final ACLType groupRights; private final ClientId clientId = ClientId.randomId(); private final boolean unsafeByteBufferConversion; private Text dtService; @@ -244,12 +235,8 @@ public RpcClient(ConfigurationSource conf, String omServiceId) Preconditions.checkNotNull(conf); this.conf = conf; this.ugi = UserGroupInformation.getCurrentUser(); - // Get default acl rights for user and group. - OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class); replicationConfigValidator = this.conf.getObject(ReplicationConfigValidator.class); - this.userRights = aclConfig.getUserDefaultRights(); - this.groupRights = aclConfig.getGroupDefaultRights(); this.clientConfig = conf.getObject(OzoneClientConfig.class); this.ecReconstructExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( @@ -450,20 +437,6 @@ public void createVolume(String volumeName, VolumeArgs volArgs) ugi.getShortUserName() : volArgs.getOwner(); long quotaInNamespace = volArgs.getQuotaInNamespace(); long quotaInBytes = volArgs.getQuotaInBytes(); - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - owner, ACCESS, userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(owner).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, ACCESS, groupRights))); - //ACLs from VolumeArgs - List volumeAcls = volArgs.getAcls(); - if (volumeAcls != null) { - listOfAcls.addAll(volumeAcls); - } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); builder.setVolume(volumeName); @@ -473,11 +446,14 @@ public void createVolume(String volumeName, VolumeArgs volArgs) builder.setQuotaInNamespace(quotaInNamespace); builder.setUsedNamespace(0L); builder.addAllMetadata(volArgs.getMetadata()); - - //Remove duplicates and add ACLs - for (OzoneAcl ozoneAcl : - listOfAcls.stream().distinct().collect(Collectors.toList())) { - builder.addOzoneAcls(ozoneAcl); + //ACLs from VolumeArgs + List volumeAcls = volArgs.getAcls(); + if (volumeAcls != null) { + //Remove duplicates and add ACLs + for (OzoneAcl ozoneAcl : + volumeAcls.stream().distinct().collect(Collectors.toList())) { + builder.addOzoneAcls(ozoneAcl); + } } if (volArgs.getQuotaInBytes() == 0) { @@ -667,17 +643,6 @@ public void createBucket( .setKeyName(bucketArgs.getEncryptionKey()).build(); } - List listOfAcls = getAclList(); - //ACLs from BucketArgs - if (bucketArgs.getAcls() != null) { - listOfAcls.addAll(bucketArgs.getAcls()); - } - // Link bucket default acl - if (bucketArgs.getSourceVolume() != null - && bucketArgs.getSourceBucket() != null) { - listOfAcls.add(linkBucketDefaultAcl()); - } - OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) @@ -688,10 +653,19 @@ public void createBucket( .setSourceBucket(bucketArgs.getSourceBucket()) .setQuotaInBytes(bucketArgs.getQuotaInBytes()) .setQuotaInNamespace(bucketArgs.getQuotaInNamespace()) - .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())) .setBucketLayout(bucketLayout) .setOwner(owner); + if (bucketArgs.getAcls() != null) { + builder.setAcls(bucketArgs.getAcls()); + } + + // Link bucket default acl + if (bucketArgs.getSourceVolume() != null + && bucketArgs.getSourceBucket() != null) { + builder.addAcl(LINK_BUCKET_DEFAULT_ACL); + } + if (bek != null) { builder.setBucketEncryptionKey(bek); } @@ -752,17 +726,6 @@ private static void verifySpaceQuota(long quota) throws OMException { } } - /** - * Helper function to get default acl list for current user. - * - * @return listOfAcls - * */ - private List getAclList() { - UserGroupInformation realUserInfo = getRealUserInfo(); - return OzoneAclUtil.getAclList(realUserInfo.getUserName(), - realUserInfo.getGroupNames(), userRights, groupRights); - } - /** * Helper function to get the actual operating user. * @@ -778,16 +741,6 @@ private UserGroupInformation getRealUserInfo() { return ugi; } - /** - * Link bucket default acl defined [world::rw] - * which is similar to Linux POSIX symbolic. - * - * @return OzoneAcl - */ - private OzoneAcl linkBucketDefaultAcl() { - return new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); - } - /** * Get a valid Delegation Token. * @@ -1427,7 +1380,6 @@ public OzoneOutputStream createKey( .setReplicationConfig(replicationConfig) .addAllMetadataGdpr(metadata) .addAllTags(tags) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setOwnerName(ownerName); @@ -1536,7 +1488,6 @@ public OzoneDataStreamOutput createStreamKey( .addAllMetadataGdpr(metadata) .addAllTags(tags) .setSortDatanodesInPipeline(true) - .setAcls(getAclList()) .setOwnerName(ownerName); OpenKeySession openKey = ozoneManagerClient.openKey(builder.build()); @@ -1771,25 +1722,6 @@ public List listKeys(String volumeName, String bucketName, } } - @Override - public List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException { - - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - - return ozoneManagerClient.listTrash(volumeName, bucketName, startKeyName, - keyPrefix, maxKeys); - } - - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - - return ozoneManagerClient.recoverTrash(volumeName, bucketName, keyName, - destinationBucket); - } - @Override public OzoneKeyDetails getKeyDetails( String volumeName, String bucketName, String keyName) @@ -1974,7 +1906,6 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, .setBucketName(bucketName) .setKeyName(keyName) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .addAllMetadataGdpr(metadata) .setOwnerName(ownerName) .addAllTags(tags) @@ -2011,7 +1942,6 @@ private OpenKeySession newMultipartOpenKey( .setMultipartUploadID(uploadID) .setMultipartUploadPartNumber(partNumber) .setSortDatanodesInPipeline(sortDatanodesInPipeline) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); return ozoneManagerClient.openKey(keyArgs); @@ -2083,7 +2013,6 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setBucketName(bucketName) .setKeyName(keyName) .setMultipartUploadID(uploadID) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); @@ -2184,13 +2113,10 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { - verifyVolumeName(volumeName); - verifyBucketName(bucketName); String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); ozoneManagerClient.createDirectory(keyArgs); @@ -2271,7 +2197,6 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, .setKeyName(keyName) .setDataSize(size) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setOwnerName(ownerName) .build(); @@ -2303,7 +2228,6 @@ public OzoneDataStreamOutput createStreamFile(String volumeName, .setKeyName(keyName) .setDataSize(size) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setSortDatanodesInPipeline(true) .setOwnerName(ownerName) @@ -2337,9 +2261,16 @@ public List listStatusLight(String volumeName, String bucketName, String keyName, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) throws IOException { OmKeyArgs keyArgs = prepareOmKeyArgs(volumeName, bucketName, keyName); - return ozoneManagerClient - .listStatusLight(keyArgs, recursive, startKey, numEntries, - allowPartialPrefixes); + if (omVersion.compareTo(OzoneManagerVersion.LIGHTWEIGHT_LIST_STATUS) >= 0) { + return ozoneManagerClient.listStatusLight(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); + } else { + return ozoneManagerClient.listStatus(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes) + .stream() + .map(OzoneFileStatusLight::fromOzoneFileStatus) + .collect(Collectors.toList()); + } } /** @@ -2511,9 +2442,7 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey) private OzoneOutputStream createOutputStream(OpenKeySession openKey, KeyOutputStream keyOutputStream) throws IOException { - boolean enableHsync = conf.getBoolean( - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); + boolean enableHsync = OzoneFSUtils.canEnableHsync(conf, true); keyOutputStream .addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), openKey.getOpenVersion()); @@ -2525,9 +2454,7 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey, private OzoneOutputStream createSecureOutputStream(OpenKeySession openKey, OutputStream keyOutputStream, Syncable syncable) throws IOException { - boolean enableHsync = conf.getBoolean( - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); + boolean enableHsync = OzoneFSUtils.canEnableHsync(conf, true); final FileEncryptionInfo feInfo = openKey.getKeyInfo().getFileEncryptionInfo(); if (feInfo != null) { @@ -2608,17 +2535,27 @@ public OzoneFsServerDefaults getServerDefaults() throws IOException { long now = Time.monotonicNow(); if ((serverDefaults == null) || (now - serverDefaultsLastUpdate > serverDefaultsValidityPeriod)) { - serverDefaults = ozoneManagerClient.getServerDefaults(); - serverDefaultsLastUpdate = now; + try { + for (ServiceInfo si : ozoneManagerClient.getServiceInfo() + .getServiceInfoList()) { + if (si.getServerDefaults() != null) { + serverDefaults = si.getServerDefaults(); + serverDefaultsLastUpdate = now; + break; + } + } + } catch (Exception e) { + LOG.warn("Could not get server defaults from OM.", e); + } } - assert serverDefaults != null; return serverDefaults; } @Override public URI getKeyProviderUri() throws IOException { - return OzoneKMSUtil.getKeyProviderUri(ugi, - null, getServerDefaults().getKeyProviderUri(), conf); + String keyProviderUri = (getServerDefaults() != null) ? + serverDefaults.getKeyProviderUri() : null; + return OzoneKMSUtil.getKeyProviderUri(ugi, null, keyProviderUri, conf); } @Override @@ -2735,6 +2672,61 @@ public void recoverKey(OmKeyArgs args, long clientID) throws IOException { ozoneManagerClient.recoverKey(args, clientID); } + @Override + public Map getObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + return ozoneManagerClient.getObjectTagging(keyArgs); + } + + @Override + public void putObjectTagging(String volumeName, String bucketName, + String keyName, Map tags) throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .addAllTags(tags) + .build(); + ozoneManagerClient.putObjectTagging(keyArgs); + } + + @Override + public void deleteObjectTagging(String volumeName, String bucketName, + String keyName) throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + ozoneManagerClient.deleteObjectTagging(keyArgs); + } + private static ExecutorService createThreadPoolExecutor( int corePoolSize, int maximumPoolSize, String threadNameFormat) { return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java index 5f2b80bdef6..0db67441fb5 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java @@ -71,8 +71,14 @@ private void init(boolean incrementalChunkList) throws IOException { ((InMemoryConfiguration)config).setFromObject(clientConfig); + ((InMemoryConfiguration) config).setBoolean( + OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + ((InMemoryConfiguration) config).setBoolean( + "ozone.client.hbase.enhancements.allowed", true); ((InMemoryConfiguration) config).setBoolean( OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + ((InMemoryConfiguration) config).setInt( + "ozone.client.bytes.per.checksum", 8192); RpcClient rpcClient = new RpcClient(config, null) { diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java index 09a6c0a5c0e..e03fa461cc6 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java @@ -117,7 +117,7 @@ public void testDeleteVolume() @Test public void testCreateVolumeWithMetadata() - throws IOException, OzoneClientException { + throws IOException { String volumeName = UUID.randomUUID().toString(); VolumeArgs volumeArgs = VolumeArgs.newBuilder() .addMetadata("key1", "val1") diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java index 25a3ad2d9c8..1b67f024bbe 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java @@ -153,7 +153,7 @@ public void testPutECKeyAndCheckDNStoredData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = 0; i < inputChunks.length; i++) { MockDatanodeStorage datanodeStorage = storages.get(dnDetails[i]); @@ -182,7 +182,7 @@ public void testPutECKeyAndCheckParityData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = dataBlocks; i < parityBlocks + dataBlocks; i++) { diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java similarity index 74% rename from hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java rename to hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java index 702a450ee75..83feb378c56 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java @@ -21,7 +21,9 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.InMemoryConfiguration; @@ -56,10 +58,11 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -77,9 +80,10 @@ import static org.mockito.Mockito.mock; /** - * Unit tests for ReplicatedFileChecksumHelper class. + * Unit tests for Replicated and EC FileChecksumHelper class. */ -public class TestReplicatedFileChecksumHelper { +public class TestFileChecksumHelper { + private final FileChecksum noCachedChecksum = null; private OzoneClient client; private ObjectStore store; private OzoneVolume volume; @@ -119,128 +123,126 @@ public void close() throws IOException { client.close(); } + private OmKeyInfo omKeyInfo(ReplicationType type, FileChecksum cachedChecksum, List locationInfo) { + ReplicationConfig config = type == ReplicationType.EC ? new ECReplicationConfig(6, 3) + : RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE); - @Test - public void testEmptyBlock() throws IOException { - // test the file checksum of a file with an empty block. - RpcClient mockRpcClient = mock(RpcClient.class); - - OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); - when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + return new OmKeyInfo.Builder() .setVolumeName(null) .setBucketName(null) .setKeyName(null) .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + new OmKeyLocationInfoGroup(0, locationInfo))) .setCreationTime(Time.now()) .setModificationTime(Time.now()) .setDataSize(0) - .setReplicationConfig(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) + .setReplicationConfig(config) .setFileEncryptionInfo(null) + .setFileChecksum(cachedChecksum) .setAcls(null) .build(); + } - when(om.lookupKey(any())).thenReturn(omKeyInfo); + private BaseFileChecksumHelper checksumHelper(ReplicationType type, OzoneVolume mockVolume, OzoneBucket mockBucket, + int length, OzoneClientConfig.ChecksumCombineMode combineMode, RpcClient mockRpcClient, OmKeyInfo keyInfo) + throws IOException { + return type == ReplicationType.RATIS ? new ReplicatedFileChecksumHelper( + mockVolume, mockBucket, "dummy", length, combineMode, mockRpcClient) + : new ECFileChecksumHelper( + mockVolume, mockBucket, "dummy", length, combineMode, mockRpcClient, keyInfo); + } - OzoneVolume mockVolume = mock(OzoneVolume.class); - when(mockVolume.getName()).thenReturn("vol1"); - OzoneBucket bucket = mock(OzoneBucket.class); - when(bucket.getName()).thenReturn("bucket1"); + private Pipeline pipeline(ReplicationType type, List datanodeDetails) { + ReplicationConfig config = type == ReplicationType.RATIS ? RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE) + : new ECReplicationConfig(6, 3); + + return Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setReplicationConfig(config) + .setState(Pipeline.PipelineState.CLOSED) + .setNodes(datanodeDetails) + .build(); + } + @ParameterizedTest + @EnumSource(names = {"EC", "RATIS"}) + public void testEmptyBlock(ReplicationType helperType) throws IOException { + // test the file checksum of a file with an empty block. + RpcClient mockRpcClient = mock(RpcClient.class); + OmKeyInfo omKeyInfo = omKeyInfo(helperType, noCachedChecksum, new ArrayList<>()); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); OzoneClientConfig.ChecksumCombineMode combineMode = OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; - ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, mockRpcClient); + OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); + when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); + when(om.lookupKey(any())).thenReturn(omKeyInfo); + when(mockVolume.getName()).thenReturn("vol1"); + when(mockBucket.getName()).thenReturn("bucket1"); + + + BaseFileChecksumHelper helper = + checksumHelper(helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); FileChecksum fileChecksum = helper.getFileChecksum(); assertInstanceOf(MD5MD5CRC32GzipFileChecksum.class, fileChecksum); assertEquals(DataChecksum.Type.CRC32, - ((MD5MD5CRC32GzipFileChecksum)fileChecksum).getCrcType()); + ((MD5MD5CRC32GzipFileChecksum) fileChecksum).getCrcType()); // test negative length - helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", -1, combineMode, mockRpcClient); + helper = + checksumHelper(helperType, mockVolume, mockBucket, -1, combineMode, mockRpcClient, omKeyInfo); helper.compute(); assertNull(helper.getKeyLocationInfoList()); } - @Test - public void testOneBlock() throws IOException { + @ParameterizedTest + @EnumSource(names = {"EC", "RATIS"}) + public void testOneBlock(ReplicationType helperType) throws IOException { // test the file checksum of a file with one block. OzoneConfiguration conf = new OzoneConfiguration(); - RpcClient mockRpcClient = mock(RpcClient.class); - - List dns = Arrays.asList( + List dns = Collections.singletonList( DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build()); - Pipeline pipeline; - pipeline = Pipeline.newBuilder() - .setId(PipelineID.randomId()) - .setReplicationConfig( - RatisReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.THREE)) - .setState(Pipeline.PipelineState.CLOSED) - .setNodes(dns) - .build(); - + Pipeline pipeline = pipeline(helperType, dns); + BlockID blockID = new BlockID(1, 1); + OmKeyLocationInfo omKeyLocationInfo = + new OmKeyLocationInfo.Builder() + .setPipeline(pipeline) + .setBlockID(blockID) + .build(); + List omKeyLocationInfoList = + Collections.singletonList(omKeyLocationInfo); + OmKeyInfo omKeyInfo = omKeyInfo(helperType, noCachedChecksum, omKeyLocationInfoList); XceiverClientGrpc xceiverClientGrpc = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) { - return buildValidResponse(); + return buildValidResponse(helperType); } }; XceiverClientFactory factory = mock(XceiverClientFactory.class); + OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); when(factory.acquireClientForReadData(any())). thenReturn(xceiverClientGrpc); - when(mockRpcClient.getXceiverClientManager()).thenReturn(factory); - - OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); - - BlockID blockID = new BlockID(1, 1); - OmKeyLocationInfo omKeyLocationInfo = - new OmKeyLocationInfo.Builder().setPipeline(pipeline) - .setBlockID(blockID) - .build(); - - List omKeyLocationInfoList = - Arrays.asList(omKeyLocationInfo); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(null) - .setBucketName(null) - .setKeyName(null) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationConfig(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) - .setFileEncryptionInfo(null) - .setAcls(null) - .build(); - when(om.lookupKey(any())).thenReturn(omKeyInfo); OzoneVolume mockVolume = mock(OzoneVolume.class); when(mockVolume.getName()).thenReturn("vol1"); - OzoneBucket bucket = mock(OzoneBucket.class); - when(bucket.getName()).thenReturn("bucket1"); + OzoneBucket mockBucket = mock(OzoneBucket.class); + when(mockBucket.getName()).thenReturn("bucket1"); OzoneClientConfig.ChecksumCombineMode combineMode = OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; - ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, mockRpcClient); + BaseFileChecksumHelper helper = checksumHelper( + helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); FileChecksum fileChecksum = helper.getFileChecksum(); @@ -249,28 +251,12 @@ public XceiverClientReply sendCommandAsync( FileChecksum cachedChecksum = new MD5MD5CRC32GzipFileChecksum(); /// test cached checksum - OmKeyInfo omKeyInfoWithChecksum = new OmKeyInfo.Builder() - .setVolumeName(null) - .setBucketName(null) - .setKeyName(null) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationConfig( - RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) - .setFileEncryptionInfo(null) - .setAcls(null) - .setFileChecksum(cachedChecksum) - .build(); + OmKeyInfo omKeyInfoWithChecksum = omKeyInfo(helperType, cachedChecksum, omKeyLocationInfoList); when(om.lookupKey(any())). thenReturn(omKeyInfoWithChecksum); - helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, - mockRpcClient); + helper = checksumHelper( + helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); fileChecksum = helper.getFileChecksum(); @@ -278,7 +264,7 @@ public XceiverClientReply sendCommandAsync( assertEquals(1, helper.getKeyLocationInfoList().size()); } - private XceiverClientReply buildValidResponse() { + private XceiverClientReply buildValidResponse(ReplicationType type) { // return a GetBlockResponse message of a block and its chunk checksums. ContainerProtos.DatanodeBlockID blockID = ContainerProtos.DatanodeBlockID.newBuilder() @@ -286,7 +272,7 @@ private XceiverClientReply buildValidResponse() { .setLocalID(1) .setBlockCommitSequenceId(1).build(); - byte[] byteArray = new byte[10]; + byte[] byteArray = new byte[12]; ByteString byteString = ByteString.copyFrom(byteArray); ContainerProtos.ChecksumData checksumData = @@ -296,13 +282,17 @@ private XceiverClientReply buildValidResponse() { .addChecksums(byteString) .build(); - ContainerProtos.ChunkInfo chunkInfo = - ContainerProtos.ChunkInfo.newBuilder() + ContainerProtos.ChunkInfo.Builder chunkInfoBuilder = ContainerProtos.ChunkInfo.newBuilder() .setChunkName("dummy_chunk") .setOffset(1) .setLen(10) - .setChecksumData(checksumData) - .build(); + .setChecksumData(checksumData); + + if (type == ReplicationType.EC) { + chunkInfoBuilder.setStripeChecksum(byteString); + } + + ContainerProtos.ChunkInfo chunkInfo = chunkInfoBuilder.build(); ContainerProtos.BlockData blockData = ContainerProtos.BlockData.newBuilder() @@ -337,6 +327,7 @@ private OzoneBucket getOzoneBucket() throws IOException { /** * Write a real key and compute file checksum of it. + * * @throws IOException */ @Test diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java index 718e724e585..10f90544de0 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java @@ -126,7 +126,7 @@ private DatanodeDetails aNode(String ip, String hostName, int port) { .setIpAddress(ip) .setHostName(hostName) .addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port)) + DatanodeDetails.newStandalonePort(port)) .build(); } } diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index bd16a0a5dfe..f7f60dcd1d1 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-common - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Common Apache Ozone Common jar diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/OzoneTrashPolicy.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/OzoneTrashPolicy.java new file mode 100644 index 00000000000..a250832215b --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/OzoneTrashPolicy.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InvalidPathException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TrashPolicyDefault; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OFSPath; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; + + +/** + * TrashPolicy for Ozone Specific Trash Operations. + */ +public class OzoneTrashPolicy extends TrashPolicyDefault { + + private static final Logger LOG = + LoggerFactory.getLogger(OzoneTrashPolicy.class); + + protected static final Path CURRENT = new Path("Current"); + + protected static final int MSECS_PER_MINUTE = 60 * 1000; + + private static final FsPermission PERMISSION = + new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); + private OzoneConfiguration ozoneConfiguration; + + public OzoneConfiguration getOzoneConfiguration() { + return ozoneConfiguration; + } + + @Override + public void initialize(Configuration conf, FileSystem fs) { + this.fs = fs; + ozoneConfiguration = OzoneConfiguration.of(conf); + float hadoopTrashInterval = conf.getFloat( + FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); + // check whether user has configured ozone specific trash-interval + // if not fall back to hadoop configuration + this.deletionInterval = (long)(conf.getFloat( + OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, hadoopTrashInterval) + * MSECS_PER_MINUTE); + } + + @Override + public boolean moveToTrash(Path path) throws IOException { + if (validatePath(path)) { + if (!isEnabled()) { + return false; + } + + if (!path.isAbsolute()) { // make path absolute + path = new Path(fs.getWorkingDirectory(), path); + } + + // check that path exists + fs.getFileStatus(path); + String qpath = fs.makeQualified(path).toString(); + + Path trashRoot = fs.getTrashRoot(path); + Path trashCurrent = new Path(trashRoot, CURRENT); + if (qpath.startsWith(trashRoot.toString())) { + return false; // already in trash + } + + if (trashRoot.getParent().toString().startsWith(qpath)) { + throw new IOException("Cannot move \"" + path + + "\" to the trash, as it contains the trash"); + } + + Path trashPath; + Path baseTrashPath; + if (fs.getUri().getScheme().equals(OzoneConsts.OZONE_OFS_URI_SCHEME)) { + OFSPath ofsPath = new OFSPath(path, ozoneConfiguration); + // trimming volume and bucket in order to be compatible with o3fs + // Also including volume and bucket name in the path is redundant as + // the key is already in a particular volume and bucket. + Path trimmedVolumeAndBucket = + new Path(OzoneConsts.OZONE_URI_DELIMITER + + ofsPath.getKeyName()); + trashPath = makeTrashRelativePath(trashCurrent, trimmedVolumeAndBucket); + baseTrashPath = makeTrashRelativePath(trashCurrent, + trimmedVolumeAndBucket.getParent()); + } else { + trashPath = makeTrashRelativePath(trashCurrent, path); + baseTrashPath = makeTrashRelativePath(trashCurrent, path.getParent()); + } + + IOException cause = null; + + // try twice, in case checkpoint between the mkdirs() & rename() + for (int i = 0; i < 2; i++) { + try { + if (!fs.mkdirs(baseTrashPath, PERMISSION)) { // create current + LOG.warn("Can't create(mkdir) trash directory: " + baseTrashPath); + return false; + } + } catch (FileAlreadyExistsException e) { + // find the path which is not a directory, and modify baseTrashPath + // & trashPath, then mkdirs + Path existsFilePath = baseTrashPath; + while (!fs.exists(existsFilePath)) { + existsFilePath = existsFilePath.getParent(); + } + baseTrashPath = new Path(baseTrashPath.toString() + .replace(existsFilePath.toString(), + existsFilePath.toString() + Time.now())); + trashPath = new Path(baseTrashPath, trashPath.getName()); + // retry, ignore current failure + --i; + continue; + } catch (IOException e) { + LOG.warn("Can't create trash directory: " + baseTrashPath, e); + cause = e; + break; + } + try { + // if the target path in Trash already exists, then append with + // a current time in millisecs. + String orig = trashPath.toString(); + + while (fs.exists(trashPath)) { + trashPath = new Path(orig + Time.now()); + } + + // move to current trash + boolean renamed = fs.rename(path, trashPath); + if (!renamed) { + LOG.error("Failed to move to trash: {}", path); + throw new IOException("Failed to move to trash: " + path); + } + LOG.info("Moved: '" + path + "' to trash at: " + trashPath); + return true; + } catch (IOException e) { + cause = e; + } + } + throw (IOException) new IOException("Failed to move to trash: " + path) + .initCause(cause); + } + return false; + } + + private boolean validatePath(Path path) throws IOException { + String key = path.toUri().getPath(); + // Check to see if bucket is path item to be deleted. + // Cannot moveToTrash if bucket is deleted, + // return error for this condition + OFSPath ofsPath = new OFSPath(key.substring(1), ozoneConfiguration); + if (path.isRoot() || ofsPath.isBucket()) { + throw new IOException("Recursive rm of bucket " + + path + " not permitted"); + } + + Path trashRoot = this.fs.getTrashRoot(path); + + LOG.debug("Key path to moveToTrash: {}", key); + String trashRootKey = trashRoot.toUri().getPath(); + LOG.debug("TrashrootKey for moveToTrash: {}", trashRootKey); + + if (!OzoneFSUtils.isValidName(key)) { + throw new InvalidPathException("Invalid path Name " + key); + } + // first condition tests when length key is <= length trash + // and second when length key > length trash + if ((key.contains(this.fs.TRASH_PREFIX)) && (trashRootKey.startsWith(key)) + || key.startsWith(trashRootKey)) { + return false; + } + return true; + } + + private Path makeTrashRelativePath(Path basePath, Path rmFilePath) { + return Path.mergePaths(basePath, rmFilePath); + } + +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java new file mode 100644 index 00000000000..17803f7af06 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java @@ -0,0 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.fs.ozone; +/** + * Ozone trash policy implementation. + */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java index 61ae0879f78..b58e1021d98 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java @@ -214,7 +214,7 @@ public String toString() { } /** - * Get the volume & bucket or mount name (non-key path). + * Get the volume and bucket or mount name (non-key path). * @return String of path excluding key in bucket. */ // Prepend a delimiter at beginning. e.g. /vol1/buc1 @@ -301,6 +301,19 @@ public boolean isSnapshotPath() { return false; } + /** + * If the path is a snapshot path get the snapshot name from the key name. + */ + public String getSnapshotName() { + if (keyName.startsWith(OM_SNAPSHOT_INDICATOR)) { + if (!bucketName.isEmpty() && !volumeName.isEmpty()) { + String[] keyNames = keyName.split(OZONE_URI_DELIMITER); + return keyNames.length > 1 ? keyNames[1] : null; + } + } + return null; + } + /** * If key name is not empty, the given path is a key. * e.g. /volume1/bucket2/key3 is a key. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 11f176362a6..8d24f2de155 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -119,7 +119,7 @@ public static InetSocketAddress getOmAddress(ConfigurationSource conf) { * Return list of OM addresses by service ids - when HA is enabled. * * @param conf {@link ConfigurationSource} - * @return {service.id -> [{@link InetSocketAddress}]} + * @return {service.id -> [{@link InetSocketAddress}]} */ public static Map> getOmHAAddressesById( ConfigurationSource conf) { @@ -243,6 +243,10 @@ public static boolean isReadOnly( case ListKeys: case ListKeysLight: case ListTrash: + // ListTrash is deprecated by HDDS-11251. Keeping this in here + // As protobuf currently doesn't support deprecating enum fields + // TODO: Remove once migrated to proto3 and mark fields in proto + // as deprecated case ServiceList: case ListOpenFiles: case ListMultiPartUploadParts: @@ -274,7 +278,9 @@ public static boolean isReadOnly( case SetSafeMode: case PrintCompactionLogDag: case GetSnapshotInfo: - case GetServerDefaults: + case GetObjectTagging: + case GetQuotaRepairStatus: + case StartQuotaRepair: return true; case CreateVolume: case SetVolumeProperty: @@ -304,6 +310,10 @@ public static boolean isReadOnly( case AddAcl: case PurgeKeys: case RecoverTrash: + // RecoverTrash is deprecated by HDDS-11251. Keeping this in here + // As protobuf currently doesn't support deprecating enum fields + // TODO: Remove once migrated to proto3 and mark fields in proto + // as deprecated case FinalizeUpgrade: case Prepare: case CancelPrepare: @@ -323,12 +333,15 @@ public static boolean isReadOnly( case DeleteSnapshot: case RenameSnapshot: case SnapshotMoveDeletedKeys: + case SnapshotMoveTableKeys: case SnapshotPurge: case RecoverLease: case SetTimes: case AbortExpiredMultiPartUploads: case SetSnapshotProperty: case QuotaRepair: + case PutObjectTagging: + case DeleteObjectTagging: case UnknownCommand: return false; case EchoRPC: @@ -697,7 +710,7 @@ public static void verifyKeyNameWithSnapshotReservedWordForDeletion(String keyNa * Look at 'ozone.om.internal.service.id' first. If configured, return that. * If the above is not configured, look at 'ozone.om.service.ids'. * If count(ozone.om.service.ids) == 1, return that id. - * If count(ozone.om.service.ids) > 1 throw exception + * If count(ozone.om.service.ids) > 1 throw exception * If 'ozone.om.service.ids' is not configured, return null. (Non HA) * @param conf configuration * @return OM service ID. @@ -757,7 +770,7 @@ public static String normalizeKey(String keyName, normalizedKeyName = new Path(OM_KEY_PREFIX + keyName) .toUri().getPath(); } - if (!keyName.equals(normalizedKeyName) && LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled() && !keyName.equals(normalizedKeyName)) { LOG.debug("Normalized key {} to {} ", keyName, normalizedKeyName.substring(1)); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 26693d19c64..e2b2f61a368 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.ratis.util.MemoizedSupplier; @@ -41,8 +42,11 @@ import java.util.function.IntFunction; import java.util.function.Supplier; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; /** * OzoneACL classes define bucket ACLs used in OZONE. @@ -58,6 +62,13 @@ public class OzoneAcl { private static final String ACL_SCOPE_REGEX = ".*\\[(ACCESS|DEFAULT)\\]"; + /** + * Link bucket default acl defined [world::rw] + * which is similar to Linux POSIX symbolic. + */ + public static final OzoneAcl LINK_BUCKET_DEFAULT_ACL = + new OzoneAcl(IAccessAuthorizer.ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); + private final ACLIdentityType type; private final String name; @JsonIgnore diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java index 8ffa3c45c09..c7e20fb7e8b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java @@ -27,7 +27,7 @@ /** * An {@link OutputStream} first write data to a buffer up to the capacity. - * Then, select {@link Underlying} by the number of bytes written. + * Then, select {@code Underlying} by the number of bytes written. * When {@link #flush()}, {@link #hflush()}, {@link #hsync()} * or {@link #close()} is invoked, * it will force flushing the buffer and {@link OutputStream} selection. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java index 7d4e769365f..99e2759117e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; /** * Protocol for OmMetadataReader's. @@ -165,4 +166,11 @@ ListKeysLightResult listKeysLight(String volumeName, String bucketName, * @throws IOException if there is error. */ List getAcl(OzoneObj obj) throws IOException; + + /** + * Gets the tags for the specified key. + * @param args Key args + * @return Tags associated with the key. + */ + Map getObjectTagging(OmKeyArgs args) throws IOException; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 0f3b55235be..880fe8614b2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -73,6 +73,9 @@ private OMConfigKeys() { public static final String OZONE_OM_DECOMMISSIONED_NODES_KEY = "ozone.om.decommissioned.nodes"; + public static final String OZONE_OM_FEATURES_DISABLED = + "ozone.om.features.disabled"; + public static final String OZONE_OM_ADDRESS_KEY = "ozone.om.address"; public static final String OZONE_OM_BIND_HOST_DEFAULT = @@ -400,6 +403,8 @@ private OMConfigKeys() { /** * Configuration properties for Snapshot Directory Service. */ + public static final String OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED = "ozone.snapshot.deep.cleaning.enabled"; + public static final boolean OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT = false; public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL = "ozone.snapshot.directory.service.interval"; public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT @@ -416,6 +421,11 @@ private OMConfigKeys() { // resulting 24MB public static final int OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT = 6000; + public static final String OZONE_THREAD_NUMBER_DIR_DELETION = + "ozone.thread.number.dir.deletion"; + + public static final int OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT = 10; + public static final String SNAPSHOT_SST_DELETING_LIMIT_PER_TASK = "ozone.snapshot.filtering.limit.per.task"; public static final int SNAPSHOT_SST_DELETING_LIMIT_PER_TASK_DEFAULT = 2; @@ -620,4 +630,9 @@ private OMConfigKeys() { public static final String OZONE_OM_MAX_BUCKET = "ozone.om.max.buckets"; public static final int OZONE_OM_MAX_BUCKET_DEFAULT = 100000; + /** + * Configuration property to configure the max server side response size for list calls. + */ + public static final String OZONE_OM_SERVER_LIST_MAX_SIZE = "ozone.om.server.list.max.size"; + public static final int OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT = 1000; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java index ae238f1b45a..db00917dacc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java @@ -80,7 +80,7 @@ T doUnderLock(String lockId, S3SecretFunction action) /** * Default implementation of secret check method. * @param kerberosId kerberos principal. - * @return true if exist associated s3 secret for given {@param kerberosId}, + * @return true if exist associated s3 secret for given {@code kerberosId}, * false if not. */ default boolean hasS3Secret(String kerberosId) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java index 0bfd6922fee..8c3943d0fab 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java @@ -24,7 +24,7 @@ * Exception thrown by * {@link org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB} when * OM leader is not ready to serve requests. This error is thrown when Raft - * Server returns {@link org.apache.ratis.protocol.LeaderNotReadyException}. + * Server returns {@link org.apache.ratis.protocol.exceptions.LeaderNotReadyException}. */ public class OMLeaderNotReadyException extends IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java index 65d9e559005..744ada797e7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java @@ -18,12 +18,9 @@ package org.apache.hadoop.ozone.om.ha; import io.grpc.Status; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; -import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -41,6 +38,7 @@ import java.util.Optional; import java.util.OptionalInt; import io.grpc.StatusRuntimeException; +import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,9 +58,10 @@ public class GrpcOMFailoverProxyProvider extends LoggerFactory.getLogger(GrpcOMFailoverProxyProvider.class); public GrpcOMFailoverProxyProvider(ConfigurationSource configuration, + UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { - super(configuration, omServiceId, protocol); + super(configuration, ugi, omServiceId, protocol); } @Override @@ -116,9 +115,7 @@ protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId) private T createOMProxy() throws IOException { InetSocketAddress addr = new InetSocketAddress(0); - Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); - return (T) RPC.getProxy(getInterface(), 0, addr, hadoopConf); + return createOMProxy(addr); } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java index 543d2e4aed3..4447a72ab13 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java @@ -29,15 +29,9 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.ha.ConfUtils; @@ -59,9 +53,7 @@ public class HadoopRpcOMFailoverProxyProvider extends public static final Logger LOG = LoggerFactory.getLogger(HadoopRpcOMFailoverProxyProvider.class); - private final long omVersion; private final Text delegationTokenService; - private final UserGroupInformation ugi; private Map omProxyInfos; private List retryExceptions = new ArrayList<>(); @@ -75,9 +67,7 @@ public HadoopRpcOMFailoverProxyProvider(ConfigurationSource configuration, UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { - super(configuration, omServiceId, protocol); - this.ugi = ugi; - this.omVersion = RPC.getProtocolVersion(protocol); + super(configuration, ugi, omServiceId, protocol); this.delegationTokenService = computeDelegationTokenService(); } @@ -130,24 +120,6 @@ protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId) setOmNodeAddressMap(omNodeAddressMap); } - private T createOMProxy(InetSocketAddress omAddress) throws IOException { - Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); - RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class); - - // FailoverOnNetworkException ensures that the IPC layer does not attempt - // retries on the same OM in case of connection exception. This retry - // policy essentially results in TRY_ONCE_THEN_FAIL. - RetryPolicy connectionRetryPolicy = RetryPolicies - .failoverOnNetworkException(0); - - return (T) RPC.getProtocolProxy(getInterface(), omVersion, - omAddress, ugi, hadoopConf, NetUtils.getDefaultSocketFactory( - hadoopConf), (int) OmUtils.getOMClientRpcTimeOut(getConf()), - connectionRetryPolicy).getProxy(); - - } - /** * Get the proxy object which should be used until the next failover event * occurs. RPC proxy object is intialized lazily. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java index 1a738b2ac84..5045a32bdcd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java @@ -21,17 +21,25 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.protobuf.ServiceException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.retry.FailoverProxyProvider; +import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.slf4j.Logger; @@ -85,13 +93,17 @@ public abstract class OMFailoverProxyProviderBase implements private Set accessControlExceptionOMs = new HashSet<>(); private boolean performFailoverDone; + private final UserGroupInformation ugi; + public OMFailoverProxyProviderBase(ConfigurationSource configuration, + UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { this.conf = configuration; this.protocolClass = protocol; this.performFailoverDone = true; this.omServiceId = omServiceId; + this.ugi = ugi; waitBetweenRetries = conf.getLong( OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY, @@ -112,6 +124,35 @@ protected abstract void loadOMClientConfigs(ConfigurationSource config, String omSvcId) throws IOException; + /** + * Get the protocol proxy for provided address. + * @param omAddress An instance of {@link InetSocketAddress} which contains the address to connect + * @return the proxy connection to the address and the set of methods supported by the server at the address + * @throws IOException if any error occurs while trying to get the proxy + */ + protected T createOMProxy(InetSocketAddress omAddress) throws IOException { + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); + + // TODO: Post upgrade to Protobuf 3.x we need to use ProtobufRpcEngine2 + RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class); + + // Ensure we do not attempt retry on the same OM in case of exceptions + RetryPolicy connectionRetryPolicy = RetryPolicies.failoverOnNetworkException(0); + + return (T) RPC.getProtocolProxy( + getInterface(), + RPC.getProtocolVersion(protocolClass), + omAddress, + ugi, + hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf), + (int) OmUtils.getOMClientRpcTimeOut(getConf()), + connectionRetryPolicy + ).getProxy(); + } + + protected synchronized boolean shouldFailover(Exception ex) { Throwable unwrappedException = HddsUtils.getUnwrappedException(ex); if (unwrappedException instanceof AccessControlException || diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java index a9fa742a108..82b9d8cccfb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java @@ -110,6 +110,10 @@ public String getOwnerName() { return ownerName; } + public long getReplicatedSize() { + return QuotaUtil.getReplicatedSize(getDataSize(), replicationConfig); + } + /** * Builder of BasicOmKeyInfo. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java index 5fe53ee1ea3..1ffae273f0f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java @@ -66,7 +66,6 @@ public static DeleteTenantState.Builder newBuilder() { /** * Builder for TenantDeleted. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String volumeName; private long volRefCount; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java index db07a19b211..0b9b4b38a51 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java @@ -34,8 +34,8 @@ private KeyValueUtil() { /** * Parse Key,Value map data from protobuf representation. */ - public static Map getFromProtobuf(List metadata) { - return metadata.stream() + public static Map getFromProtobuf(List keyValueList) { + return keyValueList.stream() .collect(Collectors.toMap(KeyValue::getKey, KeyValue::getValue)); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index 5a83f6dbba6..42c97211c97 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -48,7 +48,8 @@ public final class OmBucketInfo extends WithObjectID implements Auditable, CopyO private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(BucketInfo.getDefaultInstance()), OmBucketInfo::getFromProtobuf, - OmBucketInfo::getProtobuf); + OmBucketInfo::getProtobuf, + OmBucketInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java index 8ca0054b347..258aa1ace98 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java @@ -34,6 +34,7 @@ public final class OmDBAccessIdInfo { Proto2Codec.get(ExtendedUserAccessIdInfo.getDefaultInstance()), OmDBAccessIdInfo::getFromProtobuf, OmDBAccessIdInfo::getProtobuf, + OmDBAccessIdInfo.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { @@ -111,7 +112,6 @@ public boolean getIsDelegatedAdmin() { /** * Builder for OmDBAccessIdInfo. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String tenantId; private String userPrincipal; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java index bb356eafdd9..9aaf04f640b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java @@ -34,6 +34,7 @@ public final class OmDBTenantState implements Comparable { Proto2Codec.get(TenantState.getDefaultInstance()), OmDBTenantState::getFromProtobuf, OmDBTenantState::getProtobuf, + OmDBTenantState.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { @@ -167,7 +168,6 @@ public static OmDBTenantState getFromProtobuf(TenantState proto) { /** * Builder for OmDBTenantState. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String tenantId; private String bucketNamespaceName; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java index 75b01a04171..a511e2cb047 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java @@ -33,11 +33,11 @@ * principal. */ public final class OmDBUserPrincipalInfo { - private static final Codec CODEC - = new DelegatedCodec<>( - Proto2Codec.get(TenantUserPrincipalInfo.getDefaultInstance()), - OmDBUserPrincipalInfo::getFromProtobuf, - OmDBUserPrincipalInfo::getProtobuf); + private static final Codec CODEC = new DelegatedCodec<>( + Proto2Codec.get(TenantUserPrincipalInfo.getDefaultInstance()), + OmDBUserPrincipalInfo::getFromProtobuf, + OmDBUserPrincipalInfo::getProtobuf, + OmDBUserPrincipalInfo.class); public static Codec getCodec() { return CODEC; @@ -90,7 +90,6 @@ public static OmDBUserPrincipalInfo getFromProtobuf( /** * Builder for OmDBUserPrincipalInfo. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private Set accessIds; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 55e138dbd10..69ed1b613bd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -40,7 +40,8 @@ public class OmDirectoryInfo extends WithParentObjectId private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(DirectoryInfo.getDefaultInstance()), OmDirectoryInfo::getFromProtobuf, - OmDirectoryInfo::getProtobuf); + OmDirectoryInfo::getProtobuf, + OmDirectoryInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index f52a142239b..5c480860d2b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -65,7 +65,8 @@ private static Codec newCodec(boolean ignorePipeline) { return new DelegatedCodec<>( Proto2Codec.get(KeyInfo.getDefaultInstance()), OmKeyInfo::getFromProtobuf, - k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT_VERSION)); + k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT_VERSION), + OmKeyInfo.class); } public static Codec getCodec(boolean ignorePipeline) { @@ -360,7 +361,6 @@ public synchronized void appendNewBlocks( * @param updateTime if true, updates modification time. * @param keepOldVersions if false, old blocks won't be kept * and the new block versions will always be 0 - * @throws IOException */ public synchronized long addNewVersion( List newLocationList, boolean updateTime, @@ -628,7 +628,7 @@ public OmKeyInfo build() { /** * For network transmit. - * @return + * @return KeyInfo */ public KeyInfo getProtobuf(int clientVersion) { return getProtobuf(false, clientVersion); @@ -660,7 +660,7 @@ public KeyInfo getNetworkProtobuf(String fullKeyName, int clientVersion, /** * * @param ignorePipeline true for persist to DB, false for network transmit. - * @return + * @return KeyInfo */ public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) { return getProtobuf(ignorePipeline, null, clientVersion, false); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 61402ee28e6..7c1e01d2ae5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -42,7 +42,8 @@ public final class OmMultipartKeyInfo extends WithObjectID implements CopyObject private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(MultipartKeyInfo.getDefaultInstance()), OmMultipartKeyInfo::getFromProto, - OmMultipartKeyInfo::getProto); + OmMultipartKeyInfo::getProto, + OmMultipartKeyInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java index fff6f38a37f..a09d5ef0902 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java @@ -45,7 +45,6 @@ public static OmRangerSyncArgs.Builder newBuilder() { /** * Builder for OmRangerSyncArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private long newServiceVersion; /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java index bf331c48a14..bd1997641a7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java @@ -77,7 +77,6 @@ public static OmTenantArgs.Builder newBuilder() { /** * Builder for OmTenantArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String tenantId; private String volumeName; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 499b4878362..65182a860d9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -46,7 +46,8 @@ public final class OmVolumeArgs extends WithObjectID private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(VolumeInfo.getDefaultInstance()), OmVolumeArgs::getFromProtobuf, - OmVolumeArgs::getProtobuf); + OmVolumeArgs::getProtobuf, + OmVolumeArgs.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 517f0c14ce0..083b1329db6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -18,21 +18,25 @@ package org.apache.hadoop.ozone.om.helpers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.RequestContext; +import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; @@ -43,28 +47,51 @@ * Helper class for ozone acls operations. */ public final class OzoneAclUtil { + static final Logger LOG = LoggerFactory.getLogger(OzoneAclUtil.class); private OzoneAclUtil() { } + private static ACLType[] userRights; + private static ACLType[] groupRights; + /** - * Helper function to get access acl list for current user. + * Helper function to get default access acl list for current user. * - * @param userName - * @param userGroups + * @param ugi current login user + * @param conf current configuration * @return list of OzoneAcls * */ - public static List getAclList(String userName, - String[] userGroups, ACLType userRights, ACLType groupRights) { - + public static List getDefaultAclList(UserGroupInformation ugi, OzoneConfiguration conf) { + // Get default acl rights for user and group. + if (userRights == null || groupRights == null) { + OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); + userRights = aclConfig.getUserDefaultRights(); + groupRights = aclConfig.getGroupDefaultRights(); + } List listOfAcls = new ArrayList<>(); + // User ACL. + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userRights)); + try { + String groupName = ugi.getPrimaryGroupName(); + listOfAcls.add(new OzoneAcl(GROUP, groupName, ACCESS, groupRights)); + } catch (IOException e) { + // do nothing, since user has the permission, user can add ACL for selected groups later. + LOG.warn("Failed to get primary group from user {}", ugi); + } + return listOfAcls; + } + public static List getAclList(UserGroupInformation ugi, ACLType userPrivilege, ACLType groupPrivilege) { + List listOfAcls = new ArrayList<>(); // User ACL. - listOfAcls.add(new OzoneAcl(USER, userName, ACCESS, userRights)); - if (userGroups != null) { - // Group ACLs of the User. - Arrays.asList(userGroups).forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, ACCESS, groupRights))); + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userPrivilege)); + try { + String groupName = ugi.getPrimaryGroupName(); + listOfAcls.add(new OzoneAcl(GROUP, groupName, ACCESS, groupPrivilege)); + } catch (IOException e) { + // do nothing, since user has the permission, user can add ACL for selected groups later. + LOG.warn("Failed to get primary group from user {}", ugi); } return listOfAcls; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index 74effbd80a3..bf4ffa9d8de 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -18,10 +18,15 @@ package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.nio.file.Paths; import java.util.UUID; @@ -32,6 +37,7 @@ * Utility class for OzoneFileSystem. */ public final class OzoneFSUtils { + static final Logger LOG = LoggerFactory.getLogger(OzoneFSUtils.class); private OzoneFSUtils() { } @@ -292,4 +298,31 @@ public static Path trimPathToDepth(Path path, int maxDepth) { } return res; } + + /** + * Helper method to return whether Hsync can be enabled. + * And print warning when the config is ignored. + */ + public static boolean canEnableHsync(ConfigurationSource conf, boolean isClient) { + final String confKey = isClient ? + "ozone.client.hbase.enhancements.allowed" : + OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; + + boolean confHBaseEnhancementsAllowed = conf.getBoolean( + confKey, OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED_DEFAULT); + + boolean confHsyncEnabled = conf.getBoolean( + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); + + if (confHBaseEnhancementsAllowed) { + return confHsyncEnabled; + } else { + if (confHsyncEnabled) { + LOG.warn("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", + OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true, + confKey); + } + return false; + } + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java index 6bab1025b13..ed3d3ee25c2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java @@ -21,7 +21,6 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.IdentityProvider; import org.apache.hadoop.ipc.Schedulable; -import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +42,7 @@ public OzoneIdentityProvider() { } /** - * If schedulable isn't instance of {@link Server.Call}, + * If schedulable isn't instance of {@link org.apache.hadoop.ipc.Server.Call}, * then trying to access getCallerContext() method, will * result in an exception. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index 24c172ef8fd..2d0f92a1f0c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -34,7 +34,7 @@ /** * Args for deleted keys. This is written to om metadata deletedTable. * Once a key is deleted, it is moved to om metadata deletedTable. Having a - * {label: List} ensures that if users create & delete keys with + * label: {@code List} ensures that if users create and delete keys with * exact same uri multiple times, all the delete instances are bundled under * the same key name. This is useful as part of GDPR compliance where an * admin wants to confirm if a given key is deleted from deletedTable metadata. @@ -47,7 +47,8 @@ private static Codec newCodec(boolean ignorePipeline) { return new DelegatedCodec<>( Proto2Codec.get(RepeatedKeyInfo.getDefaultInstance()), RepeatedOmKeyInfo::getFromProto, - k -> k.getProto(ignorePipeline, ClientVersion.CURRENT_VERSION)); + k -> k.getProto(ignorePipeline, ClientVersion.CURRENT_VERSION), + RepeatedOmKeyInfo.class); } public static Codec getCodec(boolean ignorePipeline) { @@ -110,9 +111,7 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo } /** - * - * @param compact, true for persistence, false for network transmit - * @return + * @param compact true for persistence, false for network transmit */ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { List list = new ArrayList<>(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java index 20c145bd0c0..7ea932c5716 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java @@ -31,7 +31,8 @@ public final class S3SecretValue { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(S3Secret.getDefaultInstance()), S3SecretValue::fromProtobuf, - S3SecretValue::getProtobuf); + S3SecretValue::getProtobuf, + S3SecretValue.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java index dbbc3544765..4763c411934 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java @@ -69,7 +69,6 @@ public static S3VolumeContext.Builder newBuilder() { /** * Builder for S3VolumeContext. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private OmVolumeArgs omVolumeArgs; private String userPrincipal; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java index c8bdbf43c42..5dbe3487e19 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; +import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; @@ -59,6 +60,7 @@ public final class ServiceInfo { private Map ports; private OMRoleInfo omRoleInfo; + private OzoneFsServerDefaults serverDefaults; /** * Default constructor for JSON deserialization. @@ -76,6 +78,24 @@ private ServiceInfo(NodeType nodeType, List portList, OzoneManagerVersion omVersion, OMRoleInfo omRole) { + this(nodeType, hostname, portList, omVersion, omRole, null); + } + + /** + * Constructs the ServiceInfo for the {@code nodeType}. + * @param nodeType type of node/service + * @param hostname hostname of the service + * @param portList list of ports the service listens to + * @param omVersion Om Version + * @param omRole OM role Ino + * @param keyProviderUri KMS provider URI + */ + private ServiceInfo(NodeType nodeType, + String hostname, + List portList, + OzoneManagerVersion omVersion, + OMRoleInfo omRole, + OzoneFsServerDefaults serverDefaults) { Preconditions.checkNotNull(nodeType); Preconditions.checkNotNull(hostname); this.nodeType = nodeType; @@ -86,6 +106,7 @@ private ServiceInfo(NodeType nodeType, ports.put(port.getType(), port.getValue()); } this.omRoleInfo = omRole; + this.serverDefaults = serverDefaults; } /** @@ -143,6 +164,15 @@ public OMRoleInfo getOmRoleInfo() { return omRoleInfo; } + /** + * Returns the Ozone Server default configuration. + * @return OmRoleInfo + */ + @JsonIgnore + public OzoneFsServerDefaults getServerDefaults() { + return serverDefaults; + } + /** * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo. * @@ -170,6 +200,9 @@ public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() { if (nodeType == NodeType.OM && omRoleInfo != null) { builder.setOmRole(omRoleInfo); } + if (serverDefaults != null) { + builder.setServerDefaults(serverDefaults.getProtobuf()); + } return builder.build(); } @@ -185,7 +218,9 @@ public static ServiceInfo getFromProtobuf( serviceInfo.getHostname(), serviceInfo.getServicePortsList(), OzoneManagerVersion.fromProtoValue(serviceInfo.getOMVersion()), - serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null); + serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null, + serviceInfo.hasServerDefaults() ? OzoneFsServerDefaults.getFromProtobuf( + serviceInfo.getServerDefaults()) : null); } /** @@ -206,6 +241,7 @@ public static class Builder { private List portList = new ArrayList<>(); private OMRoleInfo omRoleInfo; private OzoneManagerVersion omVersion; + private OzoneFsServerDefaults serverDefaults; /** * Gets the Om Client Protocol Version. @@ -259,6 +295,11 @@ public Builder setOmRoleInfo(OMRoleInfo omRole) { return this; } + public Builder setServerDefaults(OzoneFsServerDefaults defaults) { + serverDefaults = defaults; + return this; + } + /** * Builds and returns {@link ServiceInfo} with the set values. * @return {@link ServiceInfo} @@ -268,7 +309,8 @@ public ServiceInfo build() { host, portList, omVersion, - omRoleInfo); + omRoleInfo, + serverDefaults); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java index 0d221dc1cd4..c3c8efc11ad 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java @@ -261,6 +261,11 @@ private static final class SnapshotDiffJobCodec .setSerializationInclusion(JsonInclude.Include.NON_NULL) .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + @Override + public Class getTypeClass() { + return SnapshotDiffJob.class; + } + @Override public byte[] toPersistedFormat(SnapshotDiffJob object) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 47a48c37e8e..cf0a60dd353 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -19,6 +19,7 @@ */ import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.CopyObject; @@ -51,14 +52,14 @@ * Each snapshot created has an associated SnapshotInfo entry * containing the snapshotId, snapshot path, * snapshot checkpoint directory, previous snapshotId - * for the snapshot path & global amongst other necessary fields. + * for the snapshot path and global amongst other necessary fields. */ public final class SnapshotInfo implements Auditable, CopyObject { private static final Codec CODEC = new DelegatedCodec<>( - Proto2Codec.get( - OzoneManagerProtocolProtos.SnapshotInfo.getDefaultInstance()), + Proto2Codec.get(OzoneManagerProtocolProtos.SnapshotInfo.getDefaultInstance()), SnapshotInfo::getFromProtobuf, - SnapshotInfo::getProtobuf); + SnapshotInfo::getProtobuf, + SnapshotInfo.class); public static Codec getCodec() { return CODEC; @@ -124,6 +125,7 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long exclusiveSize; private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; + private ByteString lastTransactionInfo; private SnapshotInfo(Builder b) { this.snapshotId = b.snapshotId; @@ -145,6 +147,7 @@ private SnapshotInfo(Builder b) { this.exclusiveSize = b.exclusiveSize; this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; + this.lastTransactionInfo = b.lastTransactionInfo; } public void setName(String name) { @@ -261,13 +264,15 @@ public SnapshotInfo.Builder toBuilder() { .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) .setSnapshotPath(snapshotPath) .setCheckpointDir(checkpointDir) + .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean) .setSstFiltered(sstFiltered) .setReferencedSize(referencedSize) .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir); + .setDeepCleanedDeletedDir(deepCleanedDeletedDir) + .setLastTransactionInfo(lastTransactionInfo); } /** @@ -293,6 +298,7 @@ public static class Builder { private long exclusiveSize; private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; + private ByteString lastTransactionInfo; public Builder() { // default values @@ -411,6 +417,11 @@ public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { return this; } + public Builder setLastTransactionInfo(ByteString lastTransactionInfo) { + this.lastTransactionInfo = lastTransactionInfo; + return this; + } + public SnapshotInfo build() { Preconditions.checkNotNull(name); return new SnapshotInfo(this); @@ -445,6 +456,10 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { sib.setGlobalPreviousSnapshotID(toProtobuf(globalPreviousSnapshotId)); } + if (lastTransactionInfo != null) { + sib.setLastTransactionInfo(lastTransactionInfo); + } + sib.setSnapshotPath(snapshotPath) .setCheckpointDir(checkpointDir) .setDbTxSequenceNumber(dbTxSequenceNumber) @@ -513,6 +528,10 @@ public static SnapshotInfo getFromProtobuf( snapshotInfoProto.getDeepCleanedDeletedDir()); } + if (snapshotInfoProto.hasLastTransactionInfo()) { + osib.setLastTransactionInfo(snapshotInfoProto.getLastTransactionInfo()); + } + osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) .setCheckpointDir(snapshotInfoProto.getCheckpointDir()) .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); @@ -605,6 +624,14 @@ public void setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { this.deepCleanedDeletedDir = deepCleanedDeletedDir; } + public ByteString getLastTransactionInfo() { + return lastTransactionInfo; + } + + public void setLastTransactionInfo(ByteString lastTransactionInfo) { + this.lastTransactionInfo = lastTransactionInfo; + } + /** * Generate default name of snapshot, (used if user doesn't provide one). */ @@ -673,7 +700,8 @@ public boolean equals(Object o) { referencedReplicatedSize == that.referencedReplicatedSize && exclusiveSize == that.exclusiveSize && exclusiveReplicatedSize == that.exclusiveReplicatedSize && - deepCleanedDeletedDir == that.deepCleanedDeletedDir; + deepCleanedDeletedDir == that.deepCleanedDeletedDir && + Objects.equals(lastTransactionInfo, that.lastTransactionInfo); } @Override @@ -684,7 +712,7 @@ public int hashCode() { globalPreviousSnapshotId, snapshotPath, checkpointDir, deepClean, sstFiltered, referencedSize, referencedReplicatedSize, - exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir); + exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir, lastTransactionInfo); } /** @@ -692,27 +720,7 @@ public int hashCode() { */ @Override public SnapshotInfo copyObject() { - return new Builder() - .setSnapshotId(snapshotId) - .setName(name) - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setSnapshotStatus(snapshotStatus) - .setCreationTime(creationTime) - .setDeletionTime(deletionTime) - .setPathPreviousSnapshotId(pathPreviousSnapshotId) - .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) - .setSnapshotPath(snapshotPath) - .setCheckpointDir(checkpointDir) - .setDbTxSequenceNumber(dbTxSequenceNumber) - .setDeepClean(deepClean) - .setSstFiltered(sstFiltered) - .setReferencedSize(referencedSize) - .setReferencedReplicatedSize(referencedReplicatedSize) - .setExclusiveSize(exclusiveSize) - .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir) - .build(); + return this.toBuilder().build(); } @Override @@ -737,6 +745,7 @@ public String toString() { ", exclusiveSize: '" + exclusiveSize + '\'' + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + + ", lastTransactionInfo: '" + lastTransactionInfo + '\'' + '}'; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java index 753d528cb05..a715bfbc153 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java @@ -57,7 +57,7 @@ public interface AccountNameSpace { * Get Space Usage Information for this AccountNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return + * @return SpaceUsage */ SpaceUsageSource getSpaceUsage(); @@ -71,7 +71,7 @@ public interface AccountNameSpace { /** * Get Quota Information for this AccountNameSpace. - * @return + * @return OzoneQuota */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java index 1481f1b466b..d5ecf7bba80 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java @@ -74,7 +74,7 @@ public interface BucketNameSpace { * Get Space Usage Information for this BucketNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return + * @return SpaceUsageSource */ SpaceUsageSource getSpaceUsage(); @@ -88,7 +88,7 @@ public interface BucketNameSpace { /** * Get Quota Information for this BucketNameSpace. - * @return + * @return OzoneQuota */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 45922c107cb..7f633d7ea73 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -56,7 +55,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -1055,39 +1053,6 @@ DBUpdates getDBUpdates( OzoneManagerProtocolProtos.DBUpdatesRequest dbUpdatesRequest) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException; - - /** - * Recover trash allows the user to recover keys that were marked as deleted, - * but not actually deleted by Ozone Manager. - * @param volumeName - The volume name. - * @param bucketName - The bucket name. - * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. - * @return The result of recovering operation is success or not. - * @throws IOException - */ - default boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - return false; - } - /** * * @param txnApplyWaitTimeoutSeconds Max time in SECONDS to wait for all @@ -1096,7 +1061,7 @@ default boolean recoverTrash(String volumeName, String bucketName, * @param txnApplyCheckIntervalSeconds Time in SECONDS to wait between * successive checks for all transactions * to be applied to the OM DB. - * @return + * @return {@code long} */ default long prepareOzoneManager( long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds) @@ -1181,10 +1146,40 @@ boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException; /** - * Get server default configurations. - * - * @return OzoneFsServerDefaults some default configurations from server. + * Gets the tags for the specified key. + * @param args Key args + * @return Tags associated with the key. + */ + Map getObjectTagging(OmKeyArgs args) throws IOException; + + /** + * Sets the tags to an existing key. + * @param args Key args + */ + default void putObjectTagging(OmKeyArgs args) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + + /** + * Removes all the tags from the specified key. + * @param args Key args + */ + default void deleteObjectTagging(OmKeyArgs args) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + + /** + * Get status of last triggered quota repair in OM. + * @return String + * @throws IOException + */ + String getQuotaRepairStatus() throws IOException; + + /** + * start quota repair in OM. * @throws IOException */ - OzoneFsServerDefaults getServerDefaults() throws IOException; + void startQuotaRepair(List buckets) throws IOException; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java index ac2e85da84d..c9eb9cbb44f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java @@ -121,6 +121,7 @@ public GrpcOmTransport(ConfigurationSource conf, omFailoverProxyProvider = new GrpcOMFailoverProxyProvider( conf, + ugi, omServiceId, OzoneManagerProtocolPB.class); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index f70beed5f25..6b23b0f2682 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -41,7 +41,6 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; @@ -72,7 +71,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -110,6 +108,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteSnapshotRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantResponse; @@ -127,6 +126,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; @@ -150,8 +151,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest; @@ -178,12 +177,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrintCompactionLogDagRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; @@ -198,8 +196,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SafeMode; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; @@ -268,8 +264,8 @@ public final class OzoneManagerProtocolClientSideTranslatorPB = new ThreadLocal<>(); private boolean s3AuthCheck; - public static final int BLOCK_ALLOCATION_RETRY_COUNT = 5; - public static final int BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS = 3000; + public static final int BLOCK_ALLOCATION_RETRY_COUNT = 90; + public static final int BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS = 1000; public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport, String clientId) { @@ -1656,10 +1652,13 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws .setKeyName(omKeyArgs.getKeyName()) .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata())) .setOwnerName(omKeyArgs.getOwner()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .addAllTags(KeyValueUtil.toProtobuf(omKeyArgs.getTags())); + if (omKeyArgs.getAcls() != null) { + keyArgs.addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } + setReplicationConfig(omKeyArgs.getReplicationConfig(), keyArgs); multipartInfoInitiateRequest.setKeyArgs(keyArgs.build()); @@ -1730,10 +1729,12 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setVolumeName(omKeyArgs.getVolumeName()) .setBucketName(omKeyArgs.getBucketName()) .setKeyName(omKeyArgs.getKeyName()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setOwnerName(omKeyArgs.getOwner()) .setMultipartUploadID(omKeyArgs.getMultipartUploadID()); + if (omKeyArgs.getAcls() != null) { + keyArgs.addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } multipartUploadCompleteRequest.setKeyArgs(keyArgs.build()); multipartUploadCompleteRequest.addAllPartsList(multipartUploadList @@ -2122,27 +2123,24 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { .setGetFileStatusRequest(req) .build(); - final GetFileStatusResponse resp; - try { - resp = handleError(submitRequest(omRequest)).getGetFileStatusResponse(); - } catch (IOException e) { - throw e; - } + final GetFileStatusResponse resp = handleError(submitRequest(omRequest)) + .getGetFileStatusResponse(); return OzoneFileStatus.getFromProtobuf(resp.getStatus()); } @Override public void createDirectory(OmKeyArgs args) throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() + KeyArgs.Builder keyArgsBuilder = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) - .setOwnerName(args.getOwner()) - .build(); + .setOwnerName(args.getOwner()); + if (args.getAcls() != null) { + keyArgsBuilder.addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() - .setKeyArgs(keyArgs) + .setKeyArgs(keyArgsBuilder.build()) .build(); OMRequest omRequest = createOMRequest(Type.CreateDirectory) @@ -2304,9 +2302,11 @@ public OpenKeySession createFile(OmKeyArgs args, .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .setDataSize(args.getDataSize()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setOwnerName(args.getOwner()); + if (args.getAcls() != null) { + keyArgsBuilder.addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } if (args.getReplicationConfig() != null) { if (args.getReplicationConfig() instanceof ECReplicationConfig) { keyArgsBuilder.setEcReplicationConfig( @@ -2442,85 +2442,6 @@ public List listStatus(OmKeyArgs args, boolean recursive, return listStatus(args, recursive, startKey, numEntries, false); } - @Override - public List listTrash(String volumeName, - String bucketName, String startKeyName, String keyPrefix, int maxKeys) - throws IOException { - - Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName), - "The volume name cannot be null or " + - "empty. Please enter a valid volume name or use '*' as a wild card"); - - Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName), - "The bucket name cannot be null or " + - "empty. Please enter a valid bucket name or use '*' as a wild card"); - - ListTrashRequest trashRequest = ListTrashRequest.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setStartKeyName(startKeyName) - .setKeyPrefix(keyPrefix) - .setMaxKeys(maxKeys) - .build(); - - OMRequest omRequest = createOMRequest(Type.ListTrash) - .setListTrashRequest(trashRequest) - .build(); - - ListTrashResponse trashResponse = - handleError(submitRequest(omRequest)).getListTrashResponse(); - - List deletedKeyList = - new ArrayList<>(trashResponse.getDeletedKeysCount()); - - List list = new ArrayList<>(); - for (OzoneManagerProtocolProtos.RepeatedKeyInfo - repeatedKeyInfo : trashResponse.getDeletedKeysList()) { - RepeatedOmKeyInfo fromProto = - RepeatedOmKeyInfo.getFromProto(repeatedKeyInfo); - list.add(fromProto); - } - deletedKeyList.addAll(list); - - return deletedKeyList; - } - - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - - Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName), - "The volume name cannot be null or empty. " + - "Please enter a valid volume name."); - - Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName), - "The bucket name cannot be null or empty. " + - "Please enter a valid bucket name."); - - Preconditions.checkArgument(Strings.isNullOrEmpty(keyName), - "The key name cannot be null or empty. " + - "Please enter a valid key name."); - - Preconditions.checkArgument(Strings.isNullOrEmpty(destinationBucket), - "The destination bucket name cannot be null or empty. " + - "Please enter a valid destination bucket name."); - - RecoverTrashRequest.Builder req = RecoverTrashRequest.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDestinationBucket(destinationBucket); - - OMRequest omRequest = createOMRequest(Type.RecoverTrash) - .setRecoverTrashRequest(req) - .build(); - - RecoverTrashResponse recoverResponse = - handleError(submitRequest(omRequest)).getRecoverTrashResponse(); - - return recoverResponse.getResponse(); - } - @Override public long prepareOzoneManager( long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds) @@ -2648,19 +2569,93 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) } @Override - public OzoneFsServerDefaults getServerDefaults() - throws IOException { - ServerDefaultsRequest serverDefaultsRequest = - ServerDefaultsRequest.newBuilder().build(); + public String getQuotaRepairStatus() throws IOException { + OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest quotaRepairStatusRequest = + OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest.newBuilder() + .build(); + + OMRequest omRequest = createOMRequest(Type.GetQuotaRepairStatus) + .setGetQuotaRepairStatusRequest(quotaRepairStatusRequest).build(); + + OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse quotaRepairStatusResponse + = handleError(submitRequest(omRequest)).getGetQuotaRepairStatusResponse(); + return quotaRepairStatusResponse.getStatus(); + } - OMRequest omRequest = createOMRequest(Type.GetServerDefaults) - .setServerDefaultsRequest(serverDefaultsRequest).build(); + @Override + public void startQuotaRepair(List buckets) throws IOException { + OzoneManagerProtocolProtos.StartQuotaRepairRequest startQuotaRepairRequest = + OzoneManagerProtocolProtos.StartQuotaRepairRequest.newBuilder() + .build(); + OMRequest omRequest = createOMRequest(Type.StartQuotaRepair) + .setStartQuotaRepairRequest(startQuotaRepairRequest).build(); + handleError(submitRequest(omRequest)); + } - ServerDefaultsResponse serverDefaultsResponse = - handleError(submitRequest(omRequest)).getServerDefaultsResponse(); + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .build(); - return OzoneFsServerDefaults.getFromProtobuf( - serverDefaultsResponse.getServerDefaults()); + GetObjectTaggingRequest req = + GetObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.GetObjectTagging) + .setGetObjectTaggingRequest(req) + .build(); + + GetObjectTaggingResponse resp = + handleError(submitRequest(omRequest)).getGetObjectTaggingResponse(); + + return KeyValueUtil.getFromProtobuf(resp.getTagsList()); + } + + @Override + public void putObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .addAllTags(KeyValueUtil.toProtobuf(args.getTags())) + .build(); + + PutObjectTaggingRequest req = + PutObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.PutObjectTagging) + .setPutObjectTaggingRequest(req) + .build(); + + OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + + @Override + public void deleteObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .build(); + + DeleteObjectTaggingRequest req = + DeleteObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.DeleteObjectTagging) + .setDeleteObjectTaggingRequest(req) + .build(); + + OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); } private SafeMode toProtoBuf(SafeModeAction action) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index ccb2080a875..e28c9477f29 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -242,11 +242,13 @@ public static MD5MD5Crc32FileChecksumProto convert( DataOutputBuffer buf = new DataOutputBuffer(); checksum.write(buf); byte[] bytes = buf.getData(); - DataInputBuffer buffer = new DataInputBuffer(); - buffer.reset(bytes, 0, bytes.length); - int bytesPerCRC = buffer.readInt(); - long crcPerBlock = buffer.readLong(); - buffer.close(); + int bytesPerCRC; + long crcPerBlock; + try (DataInputBuffer buffer = new DataInputBuffer()) { + buffer.reset(bytes, 0, bytes.length); + bytesPerCRC = buffer.readInt(); + crcPerBlock = buffer.readLong(); + } int offset = Integer.BYTES + Long.BYTES; ByteString byteString = ByteString.copyFrom( diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java index 19f3e7c4a25..d4db2689612 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java @@ -23,12 +23,14 @@ import java.io.IOException; import java.time.Instant; import java.util.Arrays; +import java.util.UUID; +import com.google.common.base.Preconditions; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto; @@ -47,7 +49,11 @@ public class OzoneTokenIdentifier extends AbstractDelegationTokenIdentifier { public static final Text KIND_NAME = new Text("OzoneToken"); + @Deprecated + // the certificate id of this OM, deprecated since HDDS-8829 private String omCertSerialId; + // shared secret key id generated by SCM. + private String secretKeyId; private Type tokenType; private String awsAccessId; private String signature; @@ -82,31 +88,6 @@ public Text getKind() { return KIND_NAME; } - /** Instead of relying on proto serialization, this - * provides explicit serialization for OzoneTokenIdentifier. - * @return byte[] - */ - public byte[] toUniqueSerializedKey() { - DataOutputBuffer buf = new DataOutputBuffer(); - try { - super.write(buf); - WritableUtils.writeVInt(buf, getTokenType().getNumber()); - // Set s3 specific fields. - if (getTokenType().equals(S3AUTHINFO)) { - WritableUtils.writeString(buf, getAwsAccessId()); - WritableUtils.writeString(buf, getSignature()); - WritableUtils.writeString(buf, getStrToSign()); - } else { - WritableUtils.writeString(buf, getOmCertSerialId()); - WritableUtils.writeString(buf, getOmServiceId()); - } - } catch (java.io.IOException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data ", e); - } - return buf.getData(); - } - /** Instead of relying on proto deserialization, this * provides explicit deserialization for OzoneTokenIdentifier. * @return byte[] @@ -125,20 +106,19 @@ public OzoneTokenIdentifier fromUniqueSerializedKey(byte[] rawData) setStrToSign(WritableUtils.readString(in)); } else { this.tokenType = Type.DELEGATION_TOKEN; - setOmCertSerialId(WritableUtils.readString(in)); + String value = WritableUtils.readString(in); + try { + UUID.fromString(value); + setSecretKeyId(value); + } catch (IllegalArgumentException e) { + setOmCertSerialId(value); + } setOmServiceId(WritableUtils.readString(in)); } return this; } - /** - * Overrides default implementation to write using Protobuf. - * - * @param out output stream - * @throws IOException - */ - @Override - public void write(DataOutput out) throws IOException { + public OMTokenProto toProtoBuf() throws IOException { OMTokenProto.Builder builder = OMTokenProto.newBuilder() .setMaxDate(getMaxDate()) .setType(getTokenType()) @@ -155,14 +135,28 @@ public void write(DataOutput out) throws IOException { .setSignature(getSignature()) .setStrToSign(getStrToSign()); } else { - builder.setOmCertSerialId(getOmCertSerialId()); + if (StringUtils.isNotEmpty(getOmCertSerialId())) { + builder.setOmCertSerialId(getOmCertSerialId()); + } + if (StringUtils.isNotEmpty(getSecretKeyId())) { + builder.setSecretKeyId(getSecretKeyId()); + } if (getOmServiceId() != null) { builder.setOmServiceId(getOmServiceId()); } } + return builder.build(); + } - OMTokenProto token = builder.build(); - out.write(token.toByteArray()); + /** + * Overrides default implementation to write using Protobuf. + * + * @param out output stream + * @throws IOException + */ + @Override + public void write(DataOutput out) throws IOException { + out.write(toProtoBuf().toByteArray()); } /** @@ -183,7 +177,12 @@ public void readFields(DataInput in) throws IOException { setMaxDate(token.getMaxDate()); setSequenceNumber(token.getSequenceNumber()); setMasterKeyId(token.getMasterKeyId()); - setOmCertSerialId(token.getOmCertSerialId()); + if (token.hasOmCertSerialId()) { + setOmCertSerialId(token.getOmCertSerialId()); + } + if (token.hasSecretKeyId()) { + setSecretKeyId(token.getSecretKeyId()); + } // Set s3 specific fields. if (getTokenType().equals(S3AUTHINFO)) { @@ -221,7 +220,12 @@ public static OzoneTokenIdentifier readProtoBuf(DataInput in) identifier.setSequenceNumber(token.getSequenceNumber()); identifier.setMasterKeyId(token.getMasterKeyId()); } - identifier.setOmCertSerialId(token.getOmCertSerialId()); + if (token.hasOmCertSerialId()) { + identifier.setOmCertSerialId(token.getOmCertSerialId()); + } + if (token.hasSecretKeyId()) { + identifier.setSecretKeyId(token.getSecretKeyId()); + } identifier.setOmServiceId(token.getOmServiceId()); return identifier; } @@ -264,6 +268,7 @@ public boolean equals(Object obj) { } OzoneTokenIdentifier that = (OzoneTokenIdentifier) obj; return new EqualsBuilder() + .append(getSecretKeyId(), that.getSecretKeyId()) .append(getOmCertSerialId(), that.getOmCertSerialId()) .append(getMaxDate(), that.getMaxDate()) .append(getIssueDate(), that.getIssueDate()) @@ -326,6 +331,18 @@ public String getOmCertSerialId() { public void setOmCertSerialId(String omCertSerialId) { this.omCertSerialId = omCertSerialId; + Preconditions.checkArgument(this.omCertSerialId == null || this.secretKeyId == null, + "omCertSerialId and secretKeyId cannot both be valid"); + } + + public String getSecretKeyId() { + return secretKeyId; + } + + public void setSecretKeyId(String id) { + this.secretKeyId = id; + Preconditions.checkArgument(this.omCertSerialId == null || this.secretKeyId == null, + "omCertSerialId and secretKeyId cannot both be valid"); } public String getOmServiceId() { @@ -383,7 +400,8 @@ public String toString() { .append(", signature=").append(getSignature()) .append(", awsAccessKeyId=").append(getAwsAccessId()) .append(", omServiceId=").append(getOmServiceId()) - .append(", omCertSerialId=").append(getOmCertSerialId()); + .append(", omCertSerialId=").append(getOmCertSerialId()) + .append(", secretKeyId=").append(getSecretKeyId()); return buffer.toString(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java index 1f105a03ad4..abd4cd6f6d2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -19,7 +19,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; /** - * Default implementation for {@link IAccessAuthorizer}. + * No-op implementation for {@link IAccessAuthorizer}, allows everything. * */ public class OzoneAccessAuthorizer implements IAccessAuthorizer { @@ -35,4 +35,9 @@ public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) throws OMException { return true; } + + @Override + public boolean isNative() { + return true; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java index 75dee0b8a45..7fbf5a92065 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java @@ -23,6 +23,10 @@ import org.apache.hadoop.hdds.conf.ConfigType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + /** * Ozone ACL config pojo. * */ @@ -40,7 +44,7 @@ public class OzoneAclConfig { private String userDefaultRights; @Config(key = "group.rights", - defaultValue = "ALL", + defaultValue = "READ, LIST", type = ConfigType.STRING, tags = {ConfigTag.OM, ConfigTag.SECURITY}, description = "Default group permissions set for an object in " + @@ -48,18 +52,26 @@ public class OzoneAclConfig { ) private String groupDefaultRights; - public ACLType getUserDefaultRights() { + public ACLType[] getUserDefaultRights() { + List types = new ArrayList(); if (userDefaultRights == null) { - return ACLType.ALL; + types.add(ACLType.ALL); + } else { + String[] array = userDefaultRights.trim().split(","); + Arrays.stream(array).forEach(t -> types.add(ACLType.valueOf(t.trim()))); } - return ACLType.valueOf(userDefaultRights); + return types.toArray(new ACLType[0]); } - public ACLType getGroupDefaultRights() { + public ACLType[] getGroupDefaultRights() { + List types = new ArrayList(); if (groupDefaultRights == null) { - return ACLType.ALL; + types.add(ACLType.READ); + types.add(ACLType.LIST); + } else { + String[] array = groupDefaultRights.trim().split(","); + Arrays.stream(array).forEach(t -> types.add(ACLType.valueOf(t.trim()))); } - return ACLType.valueOf(groupDefaultRights); + return types.toArray(new ACLType[0]); } - } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index ca32c96855d..e1f1f3a8c1e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -24,7 +24,7 @@ /** * Class representing an ozone object. - * It can be a volume with non-null volumeName (bucketName=null & name=null) + * It can be a volume with non-null volumeName {@literal (bucketName=null & name=null)} * or a bucket with non-null volumeName and bucketName (name=null) * or a key with non-null volumeName, bucketName and key name * (via getKeyName) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java index a9e89033129..83300d5689a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java @@ -47,6 +47,7 @@ public class SnapshotDiffReportOzone Proto2Codec.get(DiffReportEntryProto.getDefaultInstance()), SnapshotDiffReportOzone::fromProtobufDiffReportEntry, SnapshotDiffReportOzone::toProtobufDiffReportEntry, + DiffReportEntry.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getDiffReportEntryCodec() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java index b5a15db39cd..289fc42b4ed 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java @@ -69,8 +69,6 @@ public static void main(String[] args) { System.out.println( "Source code repository " + OZONE_VERSION_INFO.getUrl() + " -r " + OZONE_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + OZONE_VERSION_INFO.getUser() + " on " - + OZONE_VERSION_INFO.getDate()); System.out.println( "Compiled with protoc " + OZONE_VERSION_INFO.getHadoopProtoc2Version() + ", " + OZONE_VERSION_INFO.getGrpcProtocVersion() + diff --git a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties index 1a6e3b61519..73f02760d6f 100644 --- a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties +++ b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties @@ -19,9 +19,6 @@ version=${declared.ozone.version} release=${ozone.release} revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} hadoopProtoc2Version=${proto2.hadooprpc.protobuf.version} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java index 35a8a95d8d0..a6b5d9c0196 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java @@ -186,8 +186,8 @@ private static List getDefaultAcls() { } OzoneAclConfig aclConfig = newInstanceOf(OzoneAclConfig.class); - IAccessAuthorizer.ACLType userRights = aclConfig.getUserDefaultRights(); - IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights(); + IAccessAuthorizer.ACLType[] userRights = aclConfig.getUserDefaultRights(); + IAccessAuthorizer.ACLType[] groupRights = aclConfig.getGroupDefaultRights(); OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java index 84ad208cf93..f8363af3751 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneFsUtils.java @@ -18,8 +18,13 @@ package org.apache.hadoop.ozone.om.helpers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -38,4 +43,29 @@ public void testPaths() { assertFalse(OzoneFSUtils.isValidName("/a:/b")); assertFalse(OzoneFSUtils.isValidName("/a//b")); } + + /** + * In these scenarios below, OzoneFSUtils.canEnableHsync() should return false: + * 1. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = false + * 2. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = true + * 3. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = false + *

    + * The only case where OzoneFSUtils.canEnableHsync() would return true: + * 4. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = true + */ + @ParameterizedTest + @CsvSource({"false,false,false,false", "false,false,true,false", "false,true,false,false", "true,true,true,false", + "false,false,false,true", "false,false,true,true", "false,true,false,true", "true,true,true,true"}) + void testCanEnableHsync(boolean canEnableHsync, + boolean hbaseEnhancementsEnabled, boolean fsHsyncEnabled, + boolean isClient) { + OzoneConfiguration conf = new OzoneConfiguration(); + final String confKey = isClient ? + "ozone.client.hbase.enhancements.allowed" : + OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; + conf.setBoolean(confKey, hbaseEnhancementsEnabled); + conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, fsHsyncEnabled); + + assertEquals(canEnableHsync, OzoneFSUtils.canEnableHsync(conf, isClient)); + } } diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index a0565d7e890..ba66c5d5272 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -20,15 +20,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-csi - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone CSI service Apache Ozone CSI service jar + false true diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 29c10671d9d..733f0837fda 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -19,14 +19,15 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-datanode Apache Ozone Datanode jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT + false true true diff --git a/hadoop-ozone/dev-support/checks/_build.sh b/hadoop-ozone/dev-support/checks/_build.sh new file mode 100755 index 00000000000..b1f23a9ba8a --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_build.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +: ${OZONE_WITH_COVERAGE:="false"} + +MAVEN_OPTIONS='-V -B -DskipTests -DskipDocs --no-transfer-progress' + +if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then + MAVEN_OPTIONS="${MAVEN_OPTIONS} -Pcoverage" +else + MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" +fi + +export MAVEN_OPTS="-Xmx4096m ${MAVEN_OPTS:-}" +mvn ${MAVEN_OPTIONS} clean "$@" +rc=$? diff --git a/hadoop-ozone/dev-support/checks/_diffoscope.sh b/hadoop-ozone/dev-support/checks/_diffoscope.sh new file mode 100755 index 00000000000..cc7cc700c82 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_diffoscope.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Helper script to compare jars reported by maven-artifact-plugin + +set -e -u -o pipefail + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +BASE_DIR="$(pwd -P)" +: ${OUTPUT_LOG:="${BASE_DIR}/target/repro/output.log"} + +for jar in $(grep -o "investigate with diffoscope [^ ]*\.jar [^ ]*\.jar" "${OUTPUT_LOG}" | awk '{ print $NF }'); do + jarname=$(basename "$jar") + if [[ ! -e "$jar" ]]; then + echo "$jar does not exist" + continue + fi + + ref=$(find target/reference -name "$jarname") + if [[ -z "$ref" ]]; then + ref=$(find ~/.m2/repository -name "$jarname") + fi + + if [[ ! -e "$ref" ]]; then + echo "Reference not found for: $jarname" + continue + fi + + diffoscope "$ref" "$jar" +done diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh index 134c8f53c6e..632aecb8296 100644 --- a/hadoop-ozone/dev-support/checks/_lib.sh +++ b/hadoop-ozone/dev-support/checks/_lib.sh @@ -160,7 +160,11 @@ download_hadoop_aws() { if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then mkdir -p "${dir}" - [[ -f "${dir}.tar.gz" ]] || curl -LSs -o "${dir}.tar.gz" https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz + if [[ ! -f "${dir}.tar.gz" ]]; then + local url="https://www.apache.org/dyn/closer.lua?action=download&filename=hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz" + echo "Downloading Hadoop from ${url}" + curl -LSs --fail -o "${dir}.tar.gz" "$url" || return 1 + fi tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1 fi } diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 36205c69bb6..0249c7a498d 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -29,16 +29,20 @@ _realpath() { tempfile="${REPORT_DIR}/summary.tmp" ## generate summary txt file +failures=${REPORT_DIR}/failures.txt find "." -not -path '*/iteration*' -name 'TEST*.xml' -print0 \ | xargs -n1 -0 "grep" -l -E " "${tempfile}" + > "${failures}" +cat ${failures} > "${tempfile}" +leaks=${REPORT_DIR}/leaks.txt if [[ "${CHECK:-unit}" == "integration" ]]; then find hadoop-ozone/integration-test -not -path '*/iteration*' -name '*-output.txt' -print0 \ | xargs -n1 -0 "grep" -l -E "not closed properly|was not shutdown properly" \ | awk -F/ '{sub("-output.txt",""); print $NF}' \ - >> "${tempfile}" + > "${leaks}" + cat ${leaks} >> "${tempfile}" fi #Copy heap dump and dump leftovers @@ -50,11 +54,13 @@ find "." -not -path '*/iteration*' \ -exec mv {} "$REPORT_DIR/" \; ## Add the tests where the JVM is crashed +crashes=${REPORT_DIR}/crashes.txt grep -A1 'Crashed tests' "${REPORT_DIR}/output.log" \ | grep -v -e 'Crashed tests' -e '--' \ | cut -f2- -d' ' \ | sort -u \ - >> "${tempfile}" + > "${crashes}" +cat "${crashes}" >> "${tempfile}" # Check for tests that started but were not finished if grep -q 'There was a timeout.*in the fork' "${REPORT_DIR}/output.log"; then @@ -93,20 +99,24 @@ fi ## generate summary markdown file export SUMMARY_FILE="$REPORT_DIR/summary.md" -for TEST_RESULT_FILE in $(find "$REPORT_DIR" -name "*.txt" | grep -v output); do - - FAILURES=$(grep FAILURE "$TEST_RESULT_FILE" | grep "Tests run" | awk '{print $18}' | sort | uniq) +echo -n > "$SUMMARY_FILE" +if [ -s "${failures}" ]; then + printf "# Failed Tests\n\n" >> "$SUMMARY_FILE" + cat "${failures}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${failures}" - for FAILURE in $FAILURES; do - TEST_RESULT_LOCATION="$(_realpath --relative-to="$REPORT_DIR" "$TEST_RESULT_FILE")" - TEST_OUTPUT_LOCATION="${TEST_RESULT_LOCATION//.txt/-output.txt}" - printf " * [%s](%s) ([output](%s))\n" "$FAILURE" "$TEST_RESULT_LOCATION" "$TEST_OUTPUT_LOCATION" >> "$SUMMARY_FILE" - done -done +if [[ -s "${leaks}" ]]; then + printf "# Leaks Detected\n\n" >> "$SUMMARY_FILE" + cat "${leaks}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${leaks}" -if [ -s "$SUMMARY_FILE" ]; then - printf "# Failing tests: \n\n" | cat - "$SUMMARY_FILE" > temp && mv temp "$SUMMARY_FILE" +if [[ -s "${crashes}" ]]; then + printf "# Crashed Tests\n\n" >> "$SUMMARY_FILE" + cat "${crashes}" | sed 's/^/ * /' >> "$SUMMARY_FILE" fi +rm -f "${crashes}" ## generate counter wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures" diff --git a/hadoop-ozone/dev-support/checks/_post_process.sh b/hadoop-ozone/dev-support/checks/_post_process.sh new file mode 100644 index 00000000000..555a281445a --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_post_process.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script does common processing after Maven-based checks. +# +# - ensures Maven error is reported as failure +# - writes number of failures into file +# - exits with the correct code + +# Prerequisites: +# - $rc should be set to Maven exit code +# - $REPORT_DIR should be defined +# - $REPORT_FILE should be defined +# - Maven output should be saved in $REPORT_DIR/output.log + +# script failed, but report file is empty (does not reflect failure) +if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then + # do we know what to look for? + if [[ -n "${ERROR_PATTERN:-}" ]]; then + grep -m25 "${ERROR_PATTERN}" "${REPORT_DIR}/output.log" > "${REPORT_FILE}" + fi + if [[ ! -s "${REPORT_FILE}" ]]; then + echo "Unknown failure, check output.log" > "${REPORT_FILE}" + fi +fi + +# number of failures = number of lines in report, unless file already created with custom count +if [[ ! -s "${REPORT_DIR}/failures" ]]; then + wc -l "$REPORT_FILE" | awk '{ print $1 }' > "$REPORT_DIR/failures" +fi + +# exit with failure if report is not empty +if [[ -s "${REPORT_FILE}" ]]; then + rc=1 +fi + +exit ${rc} diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index 3425f66605e..ea9fa819ec3 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -30,6 +30,7 @@ OZONE_ROOT=$(pwd -P) source "${DIR}/_lib.sh" REPORT_DIR=${OUTPUT_DIR:-"${OZONE_ROOT}/target/acceptance"} +REPORT_FILE="$REPORT_DIR/summary.txt" OZONE_VERSION=$(mvn help:evaluate -Dexpression=ozone.version -q -DforceStdout -Dscan=false) DIST_DIR="${OZONE_ROOT}/hadoop-ozone/dist/target/ozone-$OZONE_VERSION" @@ -49,14 +50,17 @@ if [[ "${OZONE_ACCEPTANCE_SUITE}" == "s3a" ]]; then export HADOOP_AWS_DIR=${OZONE_ROOT}/target/hadoop-src fi - download_hadoop_aws "${HADOOP_AWS_DIR}" + if ! download_hadoop_aws "${HADOOP_AWS_DIR}"; then + echo "Failed to download Hadoop ${HADOOP_VERSION}" > "${REPORT_FILE}" + exit 1 + fi fi export OZONE_ACCEPTANCE_SUITE OZONE_ACCEPTANCE_TEST_TYPE cd "$DIST_DIR/compose" || exit 1 ./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log" -RES=$? +rc=$? if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "maven" ]]; then pushd result @@ -64,14 +68,13 @@ if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "maven" ]]; then find . -name junit -print0 | xargs -r -0 rm -frv cp -rv * "${REPORT_DIR}"/ popd + ERROR_PATTERN="\[ERROR\]" else cp -rv result/* "$REPORT_DIR/" - if [[ -f "${REPORT_DIR}/log.html" ]]; then - cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" - fi - grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt" + grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_FILE}" + ERROR_PATTERN="FAIL" fi find "$REPORT_DIR" -type f -empty -not -name summary.txt -print0 | xargs -0 rm -v -exit $RES +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/author.sh b/hadoop-ozone/dev-support/checks/author.sh index 7c95b5d775b..2764edb2928 100755 --- a/hadoop-ozone/dev-support/checks/author.sh +++ b/hadoop-ozone/dev-support/checks/author.sh @@ -16,6 +16,8 @@ #checks:basic +set -u -o pipefail + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 @@ -23,10 +25,10 @@ REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/author"} mkdir -p "$REPORT_DIR" REPORT_FILE="$REPORT_DIR/summary.txt" -grep -r --include="*.java" "@author" . | tee "$REPORT_FILE" - -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 +rc=0 +if grep -r --include="*.java" "@author" . | tee "$REPORT_FILE"; then + rc=1 fi + +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/bats.sh b/hadoop-ozone/dev-support/checks/bats.sh index e4e18957a62..f94ba583ee2 100755 --- a/hadoop-ozone/dev-support/checks/bats.sh +++ b/hadoop-ozone/dev-support/checks/bats.sh @@ -16,6 +16,8 @@ #checks:basic +set -u -o pipefail + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "${DIR}/../../.." || exit 1 @@ -39,11 +41,11 @@ find * \( \ \) -print0 \ | xargs -0 -n1 bats --formatter tap \ | tee -a "${REPORT_DIR}/output.log" +rc=$? grep '^\(not ok\|#\)' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" grep -c '^not ok' "${REPORT_FILE}" > "${REPORT_DIR}/failures" -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh index f9938e70387..ac524f755e3 100755 --- a/hadoop-ozone/dev-support/checks/build.sh +++ b/hadoop-ozone/dev-support/checks/build.sh @@ -13,20 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -: ${OZONE_WITH_COVERAGE:="false"} -MAVEN_OPTIONS='-V -B -DskipTests -DskipDocs --no-transfer-progress' +set -eu -o pipefail -if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then - MAVEN_OPTIONS="${MAVEN_OPTIONS} -Pcoverage" -else - MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" -fi - -export MAVEN_OPTS="-Xmx4096m $MAVEN_OPTS" -echo "${MAVEN_OPTIONS}" -mvn ${MAVEN_OPTIONS} clean install "$@" -exit $? +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +source "${DIR}"/_build.sh install "$@" diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index 18ae3905975..09b24b93a8b 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -24,7 +24,7 @@ REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/checkstyle"} mkdir -p "$REPORT_DIR" REPORT_FILE="$REPORT_DIR/summary.txt" -MAVEN_OPTIONS='-B -fae -Dskip.npx -Dskip.installnpx -Dcheckstyle.failOnViolation=false --no-transfer-progress' +MAVEN_OPTIONS='-B -fae -DskipRecon -Dcheckstyle.failOnViolation=false --no-transfer-progress' declare -i rc mvn ${MAVEN_OPTIONS} checkstyle:check > "${REPORT_DIR}/output.log" @@ -53,15 +53,8 @@ find "." -name checkstyle-errors.xml -print0 \ -e "s/>/>/g" \ | tee "$REPORT_FILE" -# check if Maven failed due to some error other than checkstyle violation -if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then - grep -m1 -F '[ERROR]' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -fi - ## generate counter grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures" -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi -exit ${rc} +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/coverage.sh b/hadoop-ozone/dev-support/checks/coverage.sh index 04961921d96..67161d01a05 100755 --- a/hadoop-ozone/dev-support/checks/coverage.sh +++ b/hadoop-ozone/dev-support/checks/coverage.sh @@ -53,4 +53,5 @@ find target/coverage-classes -type d \( -name proto -or -name proto3 -or -name g | xargs rm -rf #generate the reports -jacoco report "$REPORT_DIR/jacoco-all.exec" --classfiles target/coverage-classes --html "$REPORT_DIR/all" --xml "$REPORT_DIR/all.xml" +src=$(find hadoop-* -path '*/src/main/java' | sed 's/^/--sourcefiles /g' | xargs echo) +jacoco report "$REPORT_DIR/jacoco-all.exec" $src --classfiles target/coverage-classes --html "$REPORT_DIR/all" --xml "$REPORT_DIR/all.xml" diff --git a/hadoop-ozone/dev-support/checks/dependency.sh b/hadoop-ozone/dev-support/checks/dependency.sh index 116664df81b..dc95a25e47c 100755 --- a/hadoop-ozone/dev-support/checks/dependency.sh +++ b/hadoop-ozone/dev-support/checks/dependency.sh @@ -32,8 +32,8 @@ cp ${src_dir}/current.txt "$REPORT_DIR"/ #implementation of sort cli is not exactly the same everywhere. It's better to sort with the same command locally (diff -uw \ - <(sort ${src_dir}/jar-report.txt) \ - <(sort ${src_dir}/current.txt) \ + <(sort -u ${src_dir}/jar-report.txt) \ + <(sort -u ${src_dir}/current.txt) \ || true) \ > "$REPORT_FILE" diff --git a/hadoop-ozone/dev-support/checks/docs.sh b/hadoop-ozone/dev-support/checks/docs.sh index ce80c3f3e5c..7ebf64ef190 100755 --- a/hadoop-ozone/dev-support/checks/docs.sh +++ b/hadoop-ozone/dev-support/checks/docs.sh @@ -33,10 +33,5 @@ rc=$? grep -o 'ERROR.*' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -wc -l "${REPORT_FILE}" | awk '{ print $1 }' > "${REPORT_DIR}/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi - -exit ${rc} +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh index 8c61c524a9e..7d1565a0195 100755 --- a/hadoop-ozone/dev-support/checks/findbugs.sh +++ b/hadoop-ozone/dev-support/checks/findbugs.sh @@ -16,6 +16,8 @@ #checks:basic +set -u -o pipefail + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 @@ -25,30 +27,25 @@ source "${DIR}/_lib.sh" install_spotbugs -MAVEN_OPTIONS='-B -fae -Dskip.npx -Dskip.installnpx --no-transfer-progress' +REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"} +mkdir -p "$REPORT_DIR" +REPORT_FILE="$REPORT_DIR/summary.txt" + +MAVEN_OPTIONS='-B -fae -DskipRecon --no-transfer-progress' if [[ "${OZONE_WITH_COVERAGE}" != "true" ]]; then MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" fi #shellcheck disable=SC2086 -mvn ${MAVEN_OPTIONS} test-compile spotbugs:spotbugs "$@" +mvn ${MAVEN_OPTIONS} test-compile spotbugs:spotbugs "$@" | tee "${REPORT_DIR}/output.log" rc=$? -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"} -mkdir -p "$REPORT_DIR" -REPORT_FILE="$REPORT_DIR/summary.txt" - touch "$REPORT_FILE" find hadoop-hdds hadoop-ozone -name spotbugsXml.xml -print0 | xargs -0 unionBugs -output "${REPORT_DIR}"/summary.xml convertXmlToText "${REPORT_DIR}"/summary.xml | tee -a "${REPORT_FILE}" convertXmlToText -html:fancy-hist.xsl "${REPORT_DIR}"/summary.xml "${REPORT_DIR}"/summary.html -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi - -exit ${rc} +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index bb7088f0cd5..46f1fd77f2b 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -30,8 +30,8 @@ if [[ ${ITERATIONS} -le 0 ]]; then ITERATIONS=1 fi -export MAVEN_OPTS="-Xmx4096m $MAVEN_OPTS" -MAVEN_OPTIONS="-B -V -Dskip.npx -Dskip.installnpx -Dnative.lib.tmp.dir=/tmp --no-transfer-progress" +export MAVEN_OPTS="-Xmx4096m ${MAVEN_OPTS:-}" +MAVEN_OPTIONS="-B -V -DskipRecon -Dnative.lib.tmp.dir=/tmp --no-transfer-progress" if [[ "${OZONE_WITH_COVERAGE}" != "true" ]]; then MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" @@ -100,14 +100,10 @@ for i in $(seq 1 ${ITERATIONS}); do fi done -# check if Maven failed due to some error other than test failure -if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then - grep -m1 -F '[ERROR]' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -fi - if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then #Archive combined jacoco records mvn -B -N jacoco:merge -Djacoco.destFile=$REPORT_DIR/jacoco-combined.exec -Dscan=false fi -exit ${rc} +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/kubernetes.sh b/hadoop-ozone/dev-support/checks/kubernetes.sh index 4f4f78e6ef1..e9ecfdf5f2a 100755 --- a/hadoop-ozone/dev-support/checks/kubernetes.sh +++ b/hadoop-ozone/dev-support/checks/kubernetes.sh @@ -35,6 +35,7 @@ else fi REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/kubernetes"} +REPORT_FILE="$REPORT_DIR/summary.txt" OZONE_VERSION=$(mvn help:evaluate -Dexpression=ozone.version -q -DforceStdout -Dscan=false) DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION" @@ -48,10 +49,10 @@ mkdir -p "$REPORT_DIR" cd "$DIST_DIR/kubernetes/examples" || exit 1 ./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log" -RES=$? +rc=$? cp -r result/* "$REPORT_DIR/" -cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" -grep -A1 FAIL "${REPORT_DIR}/output.log" > "${REPORT_DIR}/summary.txt" +grep -A1 FAIL "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -exit $RES +ERROR_PATTERN="FAIL" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/license.sh b/hadoop-ozone/dev-support/checks/license.sh index 90accc85662..673a77e6d4f 100755 --- a/hadoop-ozone/dev-support/checks/license.sh +++ b/hadoop-ozone/dev-support/checks/license.sh @@ -42,7 +42,7 @@ DEFAULT_SRC="target/generated-sources/license/THIRD-PARTY.txt" src="${1:-${DEFAULT_SRC}}" if [[ ! -e ${src} ]]; then - MAVEN_OPTIONS="-B -fae -Dskip.npx -Dskip.installnpx --no-transfer-progress ${MAVEN_OPTIONS:-}" + MAVEN_OPTIONS="-B -fae -DskipRecon --no-transfer-progress ${MAVEN_OPTIONS:-}" mvn ${MAVEN_OPTIONS} license:aggregate-add-third-party | tee "${REPORT_DIR}/output.log" src="${DEFAULT_SRC}" fi @@ -67,9 +67,7 @@ grep '(' ${src} \ || true ) \ | sort -u \ | tee "${REPORT_FILE}" +rc=$? -wc -l "${REPORT_FILE}" | awk '{ print $1 }' > "${REPORT_DIR}/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/rat.sh b/hadoop-ozone/dev-support/checks/rat.sh index 2bdb66ba119..3582587f8da 100755 --- a/hadoop-ozone/dev-support/checks/rat.sh +++ b/hadoop-ozone/dev-support/checks/rat.sh @@ -28,9 +28,5 @@ mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:check "$@" grep -r --include=rat.txt "!????" $dirs | tee "$REPORT_FILE" -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi - +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/repro.sh b/hadoop-ozone/dev-support/checks/repro.sh new file mode 100755 index 00000000000..8d3db0fa7e9 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/repro.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This check verifies build reproducibility. + +set -u -o pipefail + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +BASE_DIR="$(pwd -P)" +REPORT_DIR=${OUTPUT_DIR:-"${BASE_DIR}/target/repro"} + +rc=0 +source "${DIR}"/_build.sh verify artifact:compare "$@" | tee output.log + +mkdir -p "$REPORT_DIR" +mv output.log "$REPORT_DIR"/ + +REPORT_FILE="$REPORT_DIR/summary.txt" +grep 'ERROR.*mismatch' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" + +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/sonar.sh b/hadoop-ozone/dev-support/checks/sonar.sh index 27a971f691c..b9948a31d4e 100755 --- a/hadoop-ozone/dev-support/checks/sonar.sh +++ b/hadoop-ozone/dev-support/checks/sonar.sh @@ -24,7 +24,7 @@ if [ ! "$SONAR_TOKEN" ]; then fi -mvn -V -B -DskipShade -DskipTests -Dskip.npx -Dskip.installnpx --no-transfer-progress \ +mvn -V -B -DskipShade -DskipTests -DskipRecon --no-transfer-progress \ -Dsonar.coverage.jacoco.xmlReportPaths="$(pwd)/target/coverage/all.xml" \ -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone \ verify org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar diff --git a/hadoop-ozone/dev-support/k8s/regenerate-examples.sh b/hadoop-ozone/dev-support/k8s/regenerate-examples.sh new file mode 100755 index 00000000000..ffedaa3b725 --- /dev/null +++ b/hadoop-ozone/dev-support/k8s/regenerate-examples.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -u -o pipefail + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +source "hadoop-ozone/dev-support/checks/_lib.sh" + +install_flekszible + +hadoop-ozone/dist/src/main/k8s/examples/regenerate-all.sh diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index 0ec066aca56..a902eab5a97 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -71,6 +71,8 @@ run cp -p "${ROOT}/HISTORY.md" . run cp -p "${ROOT}/SECURITY.md" . run cp -p "${ROOT}/CONTRIBUTING.md" . +run mkdir -p ./share/ozone/classpath +run mkdir -p ./share/ozone/lib run mkdir -p ./share/ozone/web run mkdir -p ./bin run mkdir -p ./sbin @@ -126,8 +128,19 @@ run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" . run cp -p -r "${ROOT}/hadoop-ozone/dist/target/k8s" kubernetes run cp -p -r "${ROOT}/hadoop-ozone/dist/target/Dockerfile" . -#Copy pre-generated keytabs -run cp -p -R "${ROOT}/hadoop-ozone/dist/src/main/keytabs" compose/_keytabs +run mkdir compose/_keytabs + +for file in $(find "${ROOT}" -path '*/target/classes/*.classpath' | sort); do + # We need to add the artifact manually as it's not part the generated classpath desciptor + module=$(basename "${file%.classpath}") + sed -i -e "s;$;:\$HDDS_LIB_JARS_DIR/${module}-${HDDS_VERSION}.jar;" "$file" + + cp -n -p -v "$file" share/ozone/classpath/ +done + +for file in $(find "${ROOT}" -path '*/share/ozone/lib/*jar' | sort); do + cp -n -p -v "$file" share/ozone/lib/ +done #workaround for https://issues.apache.org/jira/browse/MRESOURCES-236 find ./compose -name "*.sh" -exec chmod 755 {} \; diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index f2c2ba365c0..9540a5195b7 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -19,17 +19,19 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-dist Apache Ozone Distribution jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true - 20240729-jdk17-1 - apache/ozone-testkrb5:20230318-1 + apache/ozone + -rocky + 20241212-1-jdk21 + ghcr.io/apache/ozone-testkrb5:20241129-1 true @@ -71,24 +73,7 @@ maven-dependency-plugin - copy-classpath-files - prepare-package - - unpack-dependencies - - - - target/ozone-${ozone.version}/share/ozone/classpath - - *.classpath - - hdds-server-scm,ozone-common,ozone-csi,ozone-datanode,ozone-httpfsgateway, - ozone-insight,ozone-manager,ozone-recon,ozone-s3gateway,ozone-tools,hdds-rocks-native,ozone-s3-secret-store - - - - - copy-jars + copy-omitted-jars prepare-package copy-dependencies @@ -98,24 +83,6 @@ runtime - - copy-omitted-jars - prepare-package - - copy - - - target/ozone-${ozone.version}/share/ozone/lib - - - - com.google.protobuf - protobuf-java - ${grpc.protobuf-compile.version} - - - - @@ -199,6 +166,10 @@ org.apache.ozone hdds-container-service + + org.apache.ozone + ozone-recon + org.apache.ozone ozone-s3gateway @@ -267,21 +238,6 @@ - - build-with-recon - - - !skipRecon - - - - - - org.apache.ozone - ozone-recon - - - diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml index 3c97d3add76..517d03926fb 100644 --- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml +++ b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml @@ -32,28 +32,30 @@ NOTICE.txt / - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt - /licenses - + + hadoop-ozone/dist/src/main/license/src/licenses + /licenses + + LICENSE-*.txt + + + + tools + /tools + + **/* + + true + + **/.classpath + **/.project + **/.settings + **/*.iml + **/target/** + + . @@ -67,6 +69,13 @@ dev-support true + + **/.classpath + **/.project + **/.settings + **/*.iml + **/target/** + hadoop-hdds diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json index 827e2f04e10..72325cba080 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ReadKey Metrics.json @@ -1,4 +1,34 @@ { + "__inputs": [ + { + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "11.1.3" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { @@ -20,1284 +50,1763 @@ "liveNow": false, "panels": [ { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, - "id": 19, - "panels": [ + "id": 2, + "panels": [], + "title": "OM API Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 49, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of keys", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(om_performance_metrics_get_key_info_acl_check_latency_ns_num_ops[1m])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Rate of Key Reads ", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } - }, - "overrides": [] + ] }, - "gridPos": { - "h": 9, - "w": 8, - "x": 0, - "y": 1 + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 48, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "timezone": [ + "browser" + ], + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" }, - "id": 8, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" + "editorMode": "builder", + "expr": "om_performance_metrics_get_key_info_read_key_info_latency_ns_avg_time", + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "Read Key Info Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_estimate_num_keys", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_estimate_num_keys", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_estimate_num_keys", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "Rocksdb metrics (no. of keys)", - "type": "timeseries" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 53, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "cache used", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(om_performance_metrics_get_key_info_acl_check_latency_ns_num_ops[1m])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Rate of Get Key Info ACL Checks", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } - }, - "overrides": [] + ] }, - "gridPos": { - "h": 9, - "w": 8, - "x": 8, - "y": 1 + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 52, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" }, - "id": 7, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" + "editorMode": "code", + "expr": "om_performance_metrics_get_key_info_acl_check_latency_ns_avg_time", + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "Get Key Info ACL check latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_block_cache_usage", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_block_cache_usage", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Rocksdb block cache usage metrics", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of files", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 16, - "y": 1 - }, - "id": 13, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_num_files_at_level0", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_num_files_at_level0", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_s3secrettable_num_files_at_level0", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "Rocksdb level0 metrics (num files)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of keys", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 0, - "y": 10 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rdb_metrics_num_db_key_get_if_exist_checks{instance=~\".*:9875\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rdb_metrics_num_db_key_get_if_exist_gets{instance=~\".*:9875\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Rocksdb no. of db key metrics", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 8, - "y": 10 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_cur_size_active_mem_table", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_cur_size_all_mem_tables", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_size_all_mem_tables", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "Rocksdb mem table metrics (size)", - "type": "timeseries" + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 51, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 8, - "x": 16, - "y": 10 - }, - "id": 11, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(om_performance_metrics_check_access_latency_ns_num_ops[1m])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Rate of Check Access", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_estimate_table_readers_mem", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_estimate_table_readers_mem", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_estimate_table_readers_mem", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false - } - ], - "title": "Rocksdb om db table readers mem metrics", - "type": "timeseries" + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 50, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "editorMode": "code", + "expr": "om_performance_metrics_check_access_latency_ns_avg_time", + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "OM Check Access Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } - }, - "overrides": [] + ] }, - "gridPos": { - "h": 9, - "w": 8, - "x": 0, - "y": 19 + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 55, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" }, - "id": 12, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(om_performance_metrics_get_key_info_resolve_bucket_latency_ns_num_ops[1m])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Resolve Bucket Latency rate", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_filetable_live_sst_files_size", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_keytable_live_sst_files_size", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rocksdb_om_db_s3secrettable_live_sst_files_size", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "C", - "useBackend": false + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 25 + }, + "id": 54, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "om_performance_metrics_get_key_info_resolve_bucket_latency_ns_avg_time", + "instant": false, + "legendFormat": "{{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "Resolve Bucket Latency for Get Key Info", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } - ], - "title": "Rocksdb live sst file size metrics", - "type": "timeseries" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 33 + }, + "id": 56, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "{__name__=~\"om_lock.*avg.*\"}", + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A" } ], - "title": "OM Rocksdb Metrics", - "type": "row" + "title": "OM Locking Metrics", + "type": "timeseries" }, { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 1 + "y": 41 }, - "id": 20, - "panels": [ + "id": 19, + "panels": [], + "title": "OM Rocksdb Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of keys", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 42 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of ops", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_estimate_num_keys", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 2 + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_estimate_num_keys", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "id": 16, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_estimate_num_keys", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Rocksdb metrics (no. of keys)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "cache used", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "ugi_metrics_get_groups_num_ops{servername=\"ozoneManager\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 42 + }, + "id": 7, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_block_cache_usage", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_block_cache_usage", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Rocksdb block cache usage metrics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of files", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } - ], - "title": "Ugi Metrics (no. of ops)", - "type": "timeseries" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 42 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "time (ns)", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ns" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 2 + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_num_files_at_level0", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "id": 15, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_num_files_at_level0", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "ugi_metrics_get_groups_avg_time{servername=\"ozoneManager\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Ugi Metrics (avg. time)", - "type": "timeseries" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_s3secrettable_num_files_at_level0", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false } ], - "title": "OM Ugi Metrics", - "type": "row" + "title": "Rocksdb level0 metrics (num files)", + "type": "timeseries" }, { - "collapsed": true, + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "no. of keys", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, "gridPos": { - "h": 1, - "w": 24, + "h": 9, + "w": 8, "x": 0, - "y": 2 + "y": 51 }, - "id": 2, - "panels": [ + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no of keys", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "disableTextWrap": false, + "editorMode": "code", + "expr": "rdb_metrics_num_db_key_get_if_exist_checks{instance=~\".*:9875\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rdb_metrics_num_db_key_get_if_exist_gets{instance=~\".*:9875\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Rocksdb no. of db key metrics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } - }, - "overrides": [] + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 51 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 163 + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_cur_size_active_mem_table", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_cur_size_all_mem_tables", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_size_all_mem_tables", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Rocksdb mem table metrics (size)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "om_metrics_num_keys", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 51 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_estimate_table_readers_mem", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_estimate_table_readers_mem", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_estimate_table_readers_mem", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Rocksdb om db table readers mem metrics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" } - ], - "title": "OM num key metrics", - "type": "timeseries" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 60 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "no. of ops", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 163 + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_filetable_live_sst_files_size", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "id": 5, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_keytable_live_sst_files_size", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "om_metrics_num_key_ops", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "OM num key ops metrics", - "type": "timeseries" + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rocksdb_om_db_s3secrettable_live_sst_files_size", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}", + "range": true, + "refId": "C", + "useBackend": false } ], - "title": "OM Num Key Metrics", - "type": "row" + "title": "Rocksdb live sst file size metrics", + "type": "timeseries" }, { "collapsed": true, @@ -1305,9 +1814,9 @@ "h": 1, "w": 24, "x": 0, - "y": 3 + "y": 69 }, - "id": 21, + "id": 20, "panels": [ { "datasource": { @@ -1319,9 +1828,10 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", - "axisLabel": "", + "axisLabel": "no. of ops", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -1369,9 +1879,9 @@ "h": 8, "w": 8, "x": 0, - "y": 164 + "y": 83 }, - "id": 1, + "id": 16, "options": { "legend": { "calcs": [], @@ -1387,13 +1897,13 @@ "targets": [ { "datasource": { - "type": "prometheus" + "ype": "prometheus" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "om_metrics_num_get_service_lists", + "expr": "rate(ugi_metrics_get_groups_num_ops{servername=\"ozoneManager\"}[1m])", "fullMetaSearch": false, - "includeNullMetadata": true, + "includeNullMetadata": false, "instant": false, "legendFormat": "{{__name__}}, {{hostname}}", "range": true, @@ -1401,23 +1911,9 @@ "useBackend": false } ], - "title": "Get service lists metrics", + "title": "Ugi Metrics (no. of ops)", "type": "timeseries" - } - ], - "title": "OM Service Lists Metrics", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 4 - }, - "id": 22, - "panels": [ + }, { "datasource": { "type": "prometheus" @@ -1428,6 +1924,7 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "time (ns)", @@ -1478,10 +1975,10 @@ "gridPos": { "h": 8, "w": 8, - "x": 0, - "y": 5 + "x": 8, + "y": 83 }, - "id": 3, + "id": 15, "options": { "legend": { "calcs": [], @@ -1501,7 +1998,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "om_performance_metrics_get_key_info_read_key_info_latency_ns_avg_time", + "expr": "ugi_metrics_get_groups_avg_time{servername=\"ozoneManager\"}", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -1511,137 +2008,138 @@ "useBackend": false } ], - "title": "Read key info (avg time) metrics", + "title": "Ugi Metrics (avg. time)", "type": "timeseries" } ], - "title": "OM Read Key Info Metrics", + "title": "OM Ugi Metrics", "type": "row" }, { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 5 + "y": 70 }, "id": 23, - "panels": [ + "panels": [], + "title": "OM Table Cache Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 71 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus" }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 174 - }, - "id": 14, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "disableTextWrap": false, + "editorMode": "code", + "expr": "table_cache_metrics_hit_count{instance=~\".*:9875|.+9876\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus" }, - "targets": [ - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "table_cache_metrics_hit_count{instance=~\".*:9875\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "table_cache_metrics_miss_count{instance=~\".*:9875\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", - "range": true, - "refId": "B", - "useBackend": false - } - ], - "title": "Table cache metrics (count)", - "type": "timeseries" + "disableTextWrap": false, + "editorMode": "code", + "expr": "table_cache_metrics_miss_count{instance=~\".*:9875|.+9876\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}, {{hostname}}, {{tablename}}", + "range": true, + "refId": "B", + "useBackend": false } ], - "title": "OM Table Cache Metrics", - "type": "row" + "title": "Table cache metrics (count)", + "type": "timeseries" }, { "collapsed": true, @@ -1649,7 +2147,7 @@ "h": 1, "w": 24, "x": 0, - "y": 6 + "y": 79 }, "id": 9, "panels": [ @@ -1714,7 +2212,7 @@ "h": 9, "w": 8, "x": 0, - "y": 47 + "y": 111 }, "id": 17, "options": { @@ -1857,7 +2355,7 @@ "h": 9, "w": 8, "x": 8, - "y": 47 + "y": 111 }, "id": 18, "options": { @@ -1950,7 +2448,7 @@ "h": 1, "w": 24, "x": 0, - "y": 7 + "y": 80 }, "id": 24, "panels": [ @@ -2014,7 +2512,7 @@ "h": 9, "w": 8, "x": 0, - "y": 146 + "y": 210 }, "id": 26, "options": { @@ -2125,7 +2623,7 @@ "h": 9, "w": 8, "x": 8, - "y": 146 + "y": 210 }, "id": 27, "options": { @@ -2170,7 +2668,7 @@ "h": 1, "w": 24, "x": 0, - "y": 8 + "y": 81 }, "id": 25, "panels": [ @@ -2234,7 +2732,7 @@ "h": 9, "w": 8, "x": 0, - "y": 138 + "y": 202 }, "id": 30, "options": { @@ -2279,7 +2777,7 @@ "h": 1, "w": 24, "x": 0, - "y": 10 + "y": 82 }, "id": 29, "panels": [ @@ -2343,7 +2841,7 @@ "h": 8, "w": 8, "x": 0, - "y": 43 + "y": 107 }, "id": 36, "options": { @@ -2438,7 +2936,7 @@ "h": 8, "w": 8, "x": 8, - "y": 43 + "y": 107 }, "id": 37, "options": { @@ -2483,7 +2981,7 @@ "h": 1, "w": 24, "x": 0, - "y": 11 + "y": 83 }, "id": 38, "panels": [ @@ -2497,6 +2995,7 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "time (ns)", @@ -2548,7 +3047,7 @@ "h": 8, "w": 8, "x": 0, - "y": 44 + "y": 73 }, "id": 39, "options": { @@ -2609,6 +3108,7 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "no. of ops", @@ -2659,7 +3159,7 @@ "h": 8, "w": 8, "x": 8, - "y": 44 + "y": 73 }, "id": 40, "options": { @@ -2704,7 +3204,7 @@ "h": 1, "w": 24, "x": 0, - "y": 12 + "y": 84 }, "id": 42, "panels": [ @@ -2768,7 +3268,7 @@ "h": 8, "w": 8, "x": 0, - "y": 21 + "y": 85 }, "id": 41, "options": { @@ -2863,7 +3363,7 @@ "h": 8, "w": 8, "x": 8, - "y": 21 + "y": 85 }, "id": 43, "options": { @@ -2958,7 +3458,7 @@ "h": 8, "w": 8, "x": 16, - "y": 21 + "y": 85 }, "id": 44, "options": { @@ -3003,7 +3503,7 @@ "h": 1, "w": 24, "x": 0, - "y": 13 + "y": 85 }, "id": 45, "panels": [ @@ -3067,7 +3567,7 @@ "h": 8, "w": 8, "x": 0, - "y": 14 + "y": 78 }, "id": 46, "options": { @@ -3162,7 +3662,7 @@ "h": 8, "w": 8, "x": 8, - "y": 14 + "y": 78 }, "id": 47, "options": { @@ -3203,8 +3703,7 @@ } ], "refresh": "", - "schemaVersion": 38, - "style": "dark", + "schemaVersion": 39, "tags": [], "templating": { "list": [] @@ -3216,6 +3715,7 @@ "timepicker": {}, "timezone": "", "title": "Read Key Dashboard", - "version": 21, + "uid": "edu3g1mx0be2oc", + "version": 29, "weekStart": "" } \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml b/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml index 8600659b786..ee97525fbb0 100644 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml @@ -16,7 +16,7 @@ services: rm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: rm dns_search: . volumes: @@ -35,7 +35,7 @@ services: profiles: - hadoop nm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: nm dns_search: . volumes: @@ -54,7 +54,7 @@ services: profiles: - hadoop jhs: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} container_name: jhs hostname: jhs dns_search: . diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh b/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh index 4b66baca422..c16f6fe22f5 100755 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh @@ -21,8 +21,18 @@ if [[ ${SECURITY_ENABLED} == "true" ]]; then fi export COMPOSE_FILE="${COMPOSE_FILE:-docker-compose.yaml}":../common/${extra_compose_file} +: ${HADOOP_IMAGE:="apache/hadoop"} +: ${HADOOP_TEST_IMAGES:=""} + +if [[ -z "${HADOOP_TEST_IMAGES}" ]]; then + # hadoop2 and flokkr images are only available from Docker Hub + HADOOP_TEST_IMAGES="${HADOOP_TEST_IMAGES} apache/hadoop:${hadoop2.version}" + HADOOP_TEST_IMAGES="${HADOOP_TEST_IMAGES} flokkr/hadoop:3.1.2" + HADOOP_TEST_IMAGES="${HADOOP_TEST_IMAGES} ${HADOOP_IMAGE}:${hadoop.version}" +fi + export HADOOP_MAJOR_VERSION=3 -export HADOOP_VERSION=unused # will be set for each test version below +export HADOOP_TEST_IMAGE="${HADOOP_IMAGE}:${hadoop.version}" export OZONE_REPLICATION_FACTOR=3 # shellcheck source=/dev/null @@ -42,14 +52,10 @@ export OZONE_DIR=/opt/ozone # shellcheck source=/dev/null source "$COMPOSE_DIR/../testlib.sh" -for HADOOP_VERSION in ${hadoop2.version} 3.1.2 ${hadoop.version}; do - export HADOOP_VERSION - export HADOOP_MAJOR_VERSION=${HADOOP_VERSION%%.*} - if [[ "${HADOOP_VERSION}" == "${hadoop2.version}" ]] || [[ "${HADOOP_VERSION}" == "${hadoop.version}" ]]; then - export HADOOP_IMAGE=apache/hadoop - else - export HADOOP_IMAGE=flokkr/hadoop - fi +for HADOOP_TEST_IMAGE in $HADOOP_TEST_IMAGES; do + export HADOOP_TEST_IMAGE + hadoop_version="${HADOOP_TEST_IMAGE##*:}" + export HADOOP_MAJOR_VERSION=${hadoop_version%%.*} docker-compose --ansi never --profile hadoop up -d nm rm @@ -60,10 +66,10 @@ for HADOOP_VERSION in ${hadoop2.version} 3.1.2 ${hadoop.version}; do fi for scheme in o3fs ofs; do - execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${HADOOP_VERSION}-hadoopfs-${scheme}" ozonefs/hadoopo3fs.robot + execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${hadoop_version}-hadoopfs-${scheme}" ozonefs/hadoopo3fs.robot # TODO secure MapReduce test is failing with 2.7 due to some token problem if [[ ${SECURITY_ENABLED} != "true" ]] || [[ ${HADOOP_MAJOR_VERSION} == "3" ]]; then - execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${HADOOP_VERSION}-mapreduce-${scheme}" mapreduce.robot + execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${hadoop_version}-mapreduce-${scheme}" mapreduce.robot fi done diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop.conf b/hadoop-ozone/dist/src/main/compose/common/hadoop.conf index 2a82c0bfa2f..ad2d4cb42be 100644 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop.conf +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop.conf @@ -18,9 +18,6 @@ CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.framework.name=yarn -MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME MAPRED-SITE.XML_mapreduce.map.memory.mb=4096 MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096 MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml b/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml index 4fb56e5aa98..c5899eb6b4f 100644 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml @@ -16,7 +16,7 @@ services: rm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: rm volumes: - ../..:/opt/ozone @@ -31,7 +31,7 @@ services: profiles: - hadoop nm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: nm volumes: - ../..:/opt/ozone diff --git a/hadoop-ozone/dist/src/main/compose/common/init-kdc.sh b/hadoop-ozone/dist/src/main/compose/common/init-kdc.sh new file mode 100755 index 00000000000..97f532cfed7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/common/init-kdc.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +# This script exports keytabs and starts KDC server. + +export_keytab() { + kadmin.local -q "addprinc -randkey $1@EXAMPLE.COM" + kadmin.local -q "ktadd -norandkey -k /etc/security/keytabs/$2.keytab $1@EXAMPLE.COM" +} + +rm -f /etc/security/keytabs/*.keytab + +export_keytab scm/scm scm +export_keytab HTTP/scm scm +export_keytab testuser/scm scm +export_keytab testuser2/scm scm + +export_keytab testuser/dn testuser +export_keytab testuser/httpfs testuser +export_keytab testuser/om testuser +export_keytab testuser/recon testuser +export_keytab testuser/s3g testuser +export_keytab testuser/scm testuser + +export_keytab testuser2/dn testuser2 +export_keytab testuser2/httpfs testuser2 +export_keytab testuser2/om testuser2 +export_keytab testuser2/recon testuser2 +export_keytab testuser2/s3g testuser2 +export_keytab testuser2/scm testuser2 + +export_keytab om/om om +export_keytab HTTP/om om +export_keytab testuser/om om +export_keytab testuser2/om om + +export_keytab s3g/s3g s3g +export_keytab HTTP/s3g s3g +export_keytab testuser/s3g s3g +export_keytab testuser2/s3g s3g + +export_keytab httpfs/httpfs httpfs +export_keytab HTTP/httpfs httpfs +export_keytab testuser/httpfs httpfs +export_keytab testuser2/httpfs httpfs + +export_keytab recon/recon recon +export_keytab HTTP/recon recon +export_keytab testuser/recon recon +export_keytab testuser2/recon recon + +export_keytab dn/dn dn +export_keytab HTTP/dn dn +export_keytab testuser/dn dn +export_keytab testuser2/dn dn + +export_keytab HTTP/scm HTTP +export_keytab HTTP/s3g HTTP +export_keytab HTTP/httpfs HTTP +export_keytab HTTP/ozone HTTP + +export_keytab hadoop/rm hadoop + +export_keytab rm/rm rm +export_keytab nm/nm nm +export_keytab jhs/jhs jhs + +chmod 755 /etc/security/keytabs/*.keytab +chown 1000. /etc/security/keytabs/*.keytab + +krb5kdc -n diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh index 554b22b5a39..03600616a76 100644 --- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh @@ -93,11 +93,13 @@ execute_s3a_tests() { EOF # Some tests are skipped due to known issues. + # - ITestS3AContractBulkDelete: HDDS-11661 + # - ITestS3AContractCreate: HDDS-11663 # - ITestS3AContractDistCp: HDDS-10616 - # - ITestS3AContractGetFileStatusV1List: HDDS-10617 + # - ITestS3AContractMkdirWithCreatePerf: HDDS-11662 # - ITestS3AContractRename: HDDS-10665 - mvn -B -V --fail-never --no-transfer-progress \ - -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \ + mvn ${MAVEN_ARGS:-} --fail-never \ + -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractBulkDelete, !ITestS3AContractCreate#testOverwrite*EmptyDirectory[*], !ITestS3AContractDistCp, !ITestS3AContractMkdirWithCreatePerf, !ITestS3AContractRename' \ clean test local target="${RESULT_DIR}/junit/${bucket}/target" diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config index a5727d2b1e4..f7f1c24b8a0 100644 --- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config +++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config @@ -21,7 +21,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon @@ -31,6 +31,7 @@ OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB OZONE-SITE.XML_ozone.recon.address=recon:9891 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http +OZONE-SITE.XML_ozone.om.features.disabled=ATOMIC_REWRITE_KEY HADOOP_OPTS="-Dhadoop.opts=test" HDFS_STORAGECONTAINERMANAGER_OPTS="-Dhdfs.scm.opts=test" diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/test.sh b/hadoop-ozone/dist/src/main/compose/compatibility/test.sh index ee83c8ac330..5b9d757ee26 100755 --- a/hadoop-ozone/dist/src/main/compose/compatibility/test.sh +++ b/hadoop-ozone/dist/src/main/compose/compatibility/test.sh @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -#suite:compat +#suite:misc COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml index 5220d71669d..e2d7272b030 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml @@ -39,6 +39,8 @@ services: volumes: - tmpfs1:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE1_REPLICA:-1} datanode2: <<: *common-config ports: @@ -50,6 +52,8 @@ services: volumes: - tmpfs2:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE2_REPLICA:-1} datanode3: <<: *common-config ports: @@ -61,6 +65,8 @@ services: volumes: - tmpfs3:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE3_REPLICA:-1} datanode4: <<: *common-config ports: @@ -72,6 +78,34 @@ services: volumes: - tmpfs4:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE4_REPLICA:-1} + datanode5: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs5:/data + - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE5_REPLICA:-1} + datanode6: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs6:/data + - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE6_REPLICA:-1} om1: <<: *common-config environment: @@ -175,3 +209,15 @@ volumes: o: "size=1g,uid=4000" device: tmpfs type: tmpfs + tmpfs5: + driver: local + driver_opts: + o: "size=1g,uid=5000" + device: tmpfs + type: tmpfs + tmpfs6: + driver: local + driver_opts: + o: "size=1g,uid=6000" + device: tmpfs + type: tmpfs \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config index 29984d43662..6e0781a1d9e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config @@ -34,7 +34,7 @@ OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 OZONE-SITE.XML_ozone.scm.ratis.enable=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=100MB OZONE-SITE.XML_ozone.scm.block.size=20MB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB @@ -43,6 +43,7 @@ OZONE-SITE.XML_hdds.node.report.interval=20s OZONE-SITE.XML_hdds.heartbeat.interval=20s OZONE-SITE.XML_hdds.datanode.du.refresh.period=20s OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +OZONE-SITE.XML_hdds.datanode.container.db.dir=/data/metadata OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB OZONE-SITE.XML_ozone.scm.pipeline.creation.auto.factor.one=false OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 @@ -53,7 +54,8 @@ OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http - +OZONE-SITE.XML_hdds.container.balancer.balancing.iteration.interval=25s +OZONE-SITE.XML_hdds.container.balancer.trigger.du.before.move.enable=false OZONE_CONF_DIR=/etc/hadoop OZONE_LOG_DIR=/var/log/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh new file mode 100644 index 00000000000..bc4bf6c6661 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:balancer + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE0}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR +export OM_SERVICE_ID="om" +export OM=om1 +export SCM=scm1 +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env +execute_robot_test ${OM} -v REPLICATION:rs-3-2-1024k -v TYPE:EC -v LOWER_LIMIT:0.7 -v UPPER_LIMIT:1.5 -N ozone-balancer-EC balancer/testBalancer.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh similarity index 83% rename from hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh rename to hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh index e79979877ba..2c5091c64f5 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh @@ -24,10 +24,12 @@ export OM=om1 export SCM=scm1 export OZONE_REPLICATION_FACTOR=3 +export DATANODE2_REPLICA=0 +export DATANODE5_REPLICA=0 + # shellcheck source=/dev/null source "$COMPOSE_DIR/../testlib.sh" -# We need 4 dataNodes in this tests -start_docker_env 4 +start_docker_env -execute_robot_test ${OM} balancer/testBalancer.robot +execute_robot_test ${OM} -v REPLICATION:THREE -v TYPE:RATIS -v LOWER_LIMIT:3 -v UPPER_LIMIT:3.5 -N ozone-balancer-RATIS balancer/testBalancer.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config index 623f9595583..ba4d80a9d05 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env index 2de359fc5db..6507664fad7 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env @@ -15,6 +15,7 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner OZONE_OPTS= diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index 08c490ea51f..ebf2ce532bd 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -34,7 +34,7 @@ OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 OZONE-SITE.XML_ozone.scm.ratis.enable=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index 65834455eaa..ae2fb092be6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -23,7 +23,7 @@ OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config index 79d2e5285fb..f0ec8fcaa1a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config @@ -24,7 +24,7 @@ OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config index 8239aad2a5d..59b1fcf8cab 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config @@ -24,7 +24,7 @@ OZONE-SITE.XML_ozone.ozone.scm.block.size=64MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index a657f22340e..f2a9e044793 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -29,7 +29,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config index 06696a0e413..87b0cb50537 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config @@ -19,7 +19,7 @@ CORE-SITE.XML_fs.defaultFS=ofs://om OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config index 66f4cf151ec..adfaeb287d0 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config @@ -17,7 +17,7 @@ CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.block.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env index 5f3e96ed617..c3a2c5329aa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env @@ -15,6 +15,7 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=${hadoop.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml index d55d5e0e2e8..e48d3cb9b05 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml @@ -22,12 +22,12 @@ services: volumes: - ../..:/opt/hadoop - ../_keytabs:/etc/security/keytabs - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] networks: ozone_net: ipv4_address: 172.25.0.100 kms: - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} dns_search: . ports: - 9600:9600 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index db517a7f7c6..1495e89813a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -33,6 +33,7 @@ OZONE-SITE.XML_ozone.om.http-address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice +OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1.org OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2.org @@ -46,7 +47,7 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env index 34706093171..c3a2c5329aa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env @@ -15,6 +15,8 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop +HADOOP_VERSION=${hadoop.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml index 625aac77723..4db7576bd22 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml @@ -22,9 +22,9 @@ services: volumes: - ../..:/opt/hadoop - ../_keytabs:/etc/security/keytabs - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] kms: - image: apache/hadoop:3 + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} dns_search: . ports: - 9600:9600 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index 12a7819d1ad..2a58ffcf384 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -22,7 +22,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env index 5f3e96ed617..c3a2c5329aa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env @@ -15,6 +15,7 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=${hadoop.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml index 39d26c362f6..f3e372964bb 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml @@ -22,9 +22,9 @@ services: volumes: - ../..:/opt/hadoop - ../_keytabs:/etc/security/keytabs - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] kms: - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} hostname: kms dns_search: . ports: @@ -96,7 +96,7 @@ services: - 9878:9878 env_file: - ./docker-config - command: ["/opt/hadoop/bin/ozone","s3g"] + command: ["/opt/hadoop/bin/ozone","s3g", "-Dozone.om.transport.class=${OZONE_S3_OM_TRANSPORT:-org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransportFactory}"] environment: OZONE_OPTS: recon: diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index 4f13d624969..387a1c8517e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh index 644e45c4d5a..a9e87a60cdd 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh @@ -25,6 +25,7 @@ source "$COMPOSE_DIR/../testlib.sh" export SECURITY_ENABLED=true export COMPOSE_FILE=docker-compose.yaml:fcq.yaml +export OZONE_S3_OM_TRANSPORT="org.apache.hadoop.ozone.om.protocolPB.Hadoop3OmTransportFactory" start_docker_env diff --git a/hadoop-ozone/dist/src/main/compose/restart/docker-config b/hadoop-ozone/dist/src/main/compose/restart/docker-config index 161af7a2975..852eb6647c3 100644 --- a/hadoop-ozone/dist/src/main/compose/restart/docker-config +++ b/hadoop-ozone/dist/src/main/compose/restart/docker-config @@ -21,7 +21,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index cb76257cd8d..863e1d0b75a 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -33,7 +33,7 @@ source "$SCRIPT_DIR"/testlib.sh if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then java -cp "$PROJECT_DIR"/share/coverage/$(ls "$PROJECT_DIR"/share/coverage | grep test-util):"$PROJECT_DIR"/share/coverage/jacoco-core.jar org.apache.ozone.test.JacocoServer & DOCKER_BRIDGE_IP=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}') - export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*" + export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*:org.apache.ozone.*:org.hadoop.ozone.*" fi cd "$SCRIPT_DIR" diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 1e9cc85781a..8ced94e5007 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -30,8 +30,29 @@ fi source ${_testlib_dir}/compose_v2_compatibility.sh +: ${OZONE_COMPOSE_RUNNING:=false} : ${SCM:=scm} +# create temp directory for test data; only once, even if testlib.sh is sourced again +if [[ -z "${TEST_DATA_DIR:-}" ]] && [[ "${KEEP_RUNNING:-false}" == "false" ]]; then + export TEST_DATA_DIR="$(mktemp -d "${TMPDIR:-/tmp}"/robot-data-XXXXXX)" + chmod go+rx "${TEST_DATA_DIR}" + _compose_delete_test_data() { + rm -frv "${TEST_DATA_DIR}" + } + + trap _compose_cleanup EXIT HUP INT TERM +fi + +_compose_cleanup() { + if [[ "${OZONE_COMPOSE_RUNNING}" == "true" ]]; then + stop_docker_env || true + fi + if [[ "$(type -t _compose_delete_test_data || true)" == "function" ]]; then + _compose_delete_test_data + fi +} + ## @description create results directory, purging any prior data create_results_dir() { #delete previous results @@ -138,15 +159,15 @@ start_docker_env(){ create_results_dir export OZONE_SAFEMODE_MIN_DATANODES="${datanode_count}" - docker-compose --ansi never down - - trap stop_docker_env EXIT HUP INT TERM + docker-compose --ansi never down --remove-orphans opts="" if has_scalable_datanode; then opts="--scale datanode=${datanode_count}" fi + OZONE_COMPOSE_RUNNING=true + trap _compose_cleanup EXIT HUP INT TERM docker-compose --ansi never up -d $opts wait_for_safemode_exit @@ -184,11 +205,11 @@ execute_robot_test(){ local output_name=$(get_output_name) # find unique filename - declare -i i=0 - OUTPUT_FILE="robot-${output_name}1.xml" - while [[ -f $RESULT_DIR/$OUTPUT_FILE ]]; do - let ++i - OUTPUT_FILE="robot-${output_name}${i}.xml" + for ((i=1; i<1000; i++)); do + OUTPUT_FILE="robot-${output_name}$(printf "%03d" ${i}).xml" + if [[ ! -f $RESULT_DIR/$OUTPUT_FILE ]]; then + break; + fi done SMOKETEST_DIR_INSIDE="${OZONE_DIR:-/opt/hadoop}/smoketest" @@ -200,7 +221,7 @@ execute_robot_test(){ # shellcheck disable=SC2068 docker-compose exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ && docker-compose exec -T "$CONTAINER" robot \ - -v KEY_NAME:"${OZONE_BUCKET_KEY_NAME}" \ + -v ENCRYPTION_KEY:"${OZONE_BUCKET_KEY_NAME}" \ -v OM_HA_PARAM:"${OM_HA_PARAM}" \ -v OM_SERVICE_ID:"${OM_SERVICE_ID:-om}" \ -v OZONE_DIR:"${OZONE_DIR}" \ @@ -367,7 +388,8 @@ stop_docker_env(){ down_repeats=3 for i in $(seq 1 $down_repeats) do - if docker-compose --ansi never down; then + if docker-compose --ansi never --profile "*" down --remove-orphans; then + OZONE_COMPOSE_RUNNING=false return fi if [[ ${i} -eq 1 ]]; then @@ -398,7 +420,7 @@ run_rebot() { shift 2 - local tempdir="$(mktemp -d --suffix rebot -p "${output_dir}")" + local tempdir="$(mktemp -d "${output_dir}"/rebot-XXXXXX)" #Should be writeable from the docker containers where user is different. chmod a+wx "${tempdir}" if docker run --rm -v "${input_dir}":/rebot-input -v "${tempdir}":/rebot-output -w /rebot-input \ @@ -517,9 +539,13 @@ fix_data_dir_permissions() { ## @param `ozone` image version prepare_for_binary_image() { local v=$1 + local default_image="${docker.ozone.image}" # set at build-time from Maven property + local default_flavor="${docker.ozone.image.flavor}" # set at build-time from Maven property + local image="${OZONE_IMAGE:-${default_image}}" # may be specified by user running the test + local flavor="${OZONE_IMAGE_FLAVOR:-${default_flavor}}" # may be specified by user running the test export OZONE_DIR=/opt/ozone - export OZONE_IMAGE="apache/ozone:${v}" + export OZONE_TEST_IMAGE="${image}:${v}${flavor}" } ## @description Define variables required for using `ozone-runner` docker image @@ -539,7 +565,7 @@ get_runner_image_spec() { ## @param `ozone-runner` image version (optional) prepare_for_runner_image() { export OZONE_DIR=/opt/hadoop - export OZONE_IMAGE="$(get_runner_image_spec "$@")" + export OZONE_TEST_IMAGE="$(get_runner_image_spec "$@")" } ## @description Executing the Ozone Debug CLI related robot tests diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env index 4e01ec92416..2625c4fbe90 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env @@ -14,11 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=${hadoop.version} HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} OZONE_DIR=/opt/hadoop OZONE_VOLUME=./data diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml index 84ae48fbbc3..8235f213749 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml @@ -20,7 +20,7 @@ x-common-config: env_file: - docker-config - ../../../common/security.conf - image: ${OZONE_IMAGE} + image: ${OZONE_TEST_IMAGE} dns_search: . x-environment: @@ -67,7 +67,7 @@ x-volumes: services: kdc: - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] hostname: kdc image: ${OZONE_TESTKRB5_IMAGE} networks: @@ -83,7 +83,7 @@ services: - docker-config environment: HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} networks: net: ipv4_address: 10.9.0.3 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index 909b72852aa..d06d3279dc9 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -35,11 +35,12 @@ OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http +OZONE-SITE.XML_ozone.fs.hsync.enabled=true # If SCM sends container close commands as part of upgrade finalization while # datanodes are doing a leader election, all 3 replicas may end up in the diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env index 616f960b3e4..babe87a492a 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env @@ -17,6 +17,6 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} OZONE_DIR=/opt/hadoop OZONE_VOLUME=./data diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml index 28b3d922f71..7aea9af378e 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml @@ -19,7 +19,7 @@ x-common-config: &common-config env_file: - docker-config - image: ${OZONE_IMAGE} + image: ${OZONE_TEST_IMAGE} x-environment: &environment diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config index 95ce6c0c9c5..ce4a8807e54 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config @@ -25,7 +25,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm @@ -37,6 +37,7 @@ OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http +OZONE-SITE.XML_ozone.fs.hsync.enabled=true OZONE_CONF_DIR=/etc/hadoop OZONE_LOG_DIR=/var/log/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env index 4d1c35c3b2d..85c422b5ad7 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env @@ -17,7 +17,7 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} OZONE_DIR=/opt/hadoop OZONE_VOLUME=./data OM_SERVICE_ID=omservice diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml index b7bf1fc4983..880b36ff2b3 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml @@ -19,7 +19,7 @@ x-common-config: &common-config env_file: - docker-config - image: ${OZONE_IMAGE} + image: ${OZONE_TEST_IMAGE} x-environment: &environment diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config index 1b805c98960..a049ba5f012 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config @@ -27,11 +27,12 @@ OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http +OZONE-SITE.XML_ozone.fs.hsync.enabled=true OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh index 6fc4763631b..18930538029 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -#suite:compat +#suite:upgrade TEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) @@ -36,7 +36,8 @@ RESULT_DIR="$ALL_RESULT_DIR" create_results_dir # This is the version of Ozone that should use the runner image to run the # code that was built. Other versions will pull images from docker hub. export OZONE_CURRENT_VERSION="${ozone.version}" -run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION" +run_test ha non-rolling-upgrade 1.4.1 "$OZONE_CURRENT_VERSION" +# run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION" # run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION" # run_test ha non-rolling-upgrade 1.2.1 "$OZONE_CURRENT_VERSION" # run_test om-ha non-rolling-upgrade 1.1.0 "$OZONE_CURRENT_VERSION" diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh index 69af73f50c9..bad0a704d39 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh @@ -51,12 +51,12 @@ create_data_dirs() { # be used. ## Else, a binary image will be used. prepare_for_image() { - local image_version="$1" + local image_version="${1}" if [[ "$image_version" = "$OZONE_CURRENT_VERSION" ]]; then prepare_for_runner_image else - prepare_for_binary_image "$image_version" + prepare_for_binary_image "${image_version}" fi } diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh new file mode 100644 index 00000000000..ec64d5dcd54 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/non-rolling-upgrade/callbacks/1.5.0/callback.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source "$TEST_DIR"/testlib.sh + +with_this_version_pre_finalized() { + # New layout features were added in this version, so OM and SCM should be pre-finalized. + execute_robot_test "$SCM" -N "${OUTPUT_NAME}-check-finalization" --include pre-finalized upgrade/check-finalization.robot + # Test that HSync is disabled when pre-finalized. + execute_robot_test "$SCM" -N "${OUTPUT_NAME}-hsync" --include pre-finalized-hsync-tests hsync/upgrade-hsync-check.robot +} + +with_this_version_finalized() { + execute_robot_test "$SCM" -N "${OUTPUT_NAME}-check-finalization" --include finalized upgrade/check-finalization.robot + execute_robot_test "$SCM" -N "${OUTPUT_NAME}-hsync" debug/ozone-debug-lease-recovery.robot + execute_robot_test "$SCM" -N "${OUTPUT_NAME}-freon-hsync" freon/hsync.robot +} diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/.env b/hadoop-ozone/dist/src/main/compose/xcompat/.env index 140975d4bd0..11979d34326 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/.env +++ b/hadoop-ozone/dist/src/main/compose/xcompat/.env @@ -17,3 +17,8 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner +HADOOP_IMAGE=apache/hadoop +HADOOP_VERSION=${hadoop.version} +OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} +OZONE_IMAGE=${docker.ozone.image} +OZONE_IMAGE_FLAVOR="${docker.ozone.image.flavor}" diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml index 0bf0f619bd7..567845e0889 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml @@ -14,48 +14,58 @@ # See the License for the specific language governing permissions and # limitations under the License. +x-common-config: + &common-config + dns_search: . + env_file: + - docker-config + command: ["sleep","1000000"] + +x-old-config: + &old-config + <<: *common-config + volumes: + - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf + - ${TEST_DATA_DIR}:/testdata + +x-new-config: + &new-config + image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} + <<: *common-config + volumes: + - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf + - ${TEST_DATA_DIR}:/testdata + services: old_client_1_0_0: - image: apache/ozone:1.0.0 - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.0.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_1_0: - image: apache/ozone:1.1.0 - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.1.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_2_1: - image: apache/ozone:1.2.1 - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.2.1${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_3_0: - image: apache/ozone:1.3.0 - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.3.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_4_0: - image: apache/ozone:1.4.0 - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.4.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + + old_client_1_4_1: + image: ${OZONE_IMAGE}:1.4.1${OZONE_IMAGE_FLAVOR} + <<: *old-config + new_client: - image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} - env_file: - - docker-config - volumes: - - ../..:/opt/hadoop + <<: *new-config environment: OZONE_OPTS: - command: ["sleep","1000000"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config index 85099f902d3..746b2b6e943 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config +++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +CORE-SITE.XML_fs.defaultFS=ofs://om +CORE-SITE.XML_fs.trash.interval=1 CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem OZONE-SITE.XML_hdds.datanode.dir=/data/hdds @@ -22,6 +24,7 @@ OZONE-SITE.XML_hdds.scm.safemode.min.datanode=3 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.http-address=scm:9876 OZONE-SITE.XML_ozone.recon.address=recon:9891 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_ozone.server.default.replication=3 @@ -29,11 +32,100 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 +OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 OZONE-SITE.XML_recon.om.snapshot.task.interval.delay=1m OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.default.bucket.layout=LEGACY OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -no_proxy=om,recon,scm,s3g,kdc,localhost,127.0.0.1 + +OZONE-SITE.XML_hdds.block.token.enabled=true +OZONE-SITE.XML_hdds.container.token.enabled=true +OZONE-SITE.XML_hdds.grpc.tls.enabled=true + +OZONE-SITE.XML_ozone.security.enabled=true +OZONE-SITE.XML_ozone.acl.enabled=true +OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer +OZONE-SITE.XML_ozone.administrators="testuser,recon,om" +OZONE-SITE.XML_ozone.s3.administrators="testuser,recon,om" +OZONE-SITE.XML_ozone.recon.administrators="testuser2" +OZONE-SITE.XML_ozone.s3.administrators="testuser,s3g" + +HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 +HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 +CORE-SITE.XML_dfs.data.transfer.protection=authentication +CORE-SITE.XML_hadoop.security.authentication=kerberos +CORE-SITE.XML_hadoop.security.auth_to_local="DEFAULT" +CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms + +OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM +OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab +OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM +OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab +OZONE-SITE.XML_ozone.recon.kerberos.keytab.file=/etc/security/keytabs/recon.keytab +OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/recon@EXAMPLE.COM + +OZONE-SITE.XML_ozone.s3g.kerberos.keytab.file=/etc/security/keytabs/s3g.keytab +OZONE-SITE.XML_ozone.s3g.kerberos.principal=s3g/s3g@EXAMPLE.COM + +OZONE-SITE.XML_ozone.httpfs.kerberos.keytab.file=/etc/security/keytabs/httpfs.keytab +OZONE-SITE.XML_ozone.httpfs.kerberos.principal=httpfs/httpfs@EXAMPLE.COM + +HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/dn@EXAMPLE.COM +HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab +HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab +HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM +HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab + +OZONE-SITE.XML_ozone.security.http.kerberos.enabled=true +OZONE-SITE.XML_ozone.s3g.secret.http.enabled=true +OZONE-SITE.XML_ozone.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer + +OZONE-SITE.XML_ozone.om.http.auth.type=kerberos +OZONE-SITE.XML_hdds.scm.http.auth.type=kerberos +OZONE-SITE.XML_hdds.datanode.http.auth.type=kerberos +OZONE-SITE.XML_ozone.s3g.http.auth.type=kerberos +OZONE-SITE.XML_ozone.s3g.secret.http.auth.type=kerberos +OZONE-SITE.XML_ozone.httpfs.http.auth.type=kerberos +OZONE-SITE.XML_ozone.recon.http.auth.type=kerberos + +OZONE-SITE.XML_hdds.scm.http.auth.kerberos.principal=HTTP/scm@EXAMPLE.COM +OZONE-SITE.XML_hdds.scm.http.auth.kerberos.keytab=/etc/security/keytabs/scm.keytab +OZONE-SITE.XML_ozone.om.http.auth.kerberos.principal=HTTP/om@EXAMPLE.COM +OZONE-SITE.XML_ozone.om.http.auth.kerberos.keytab=/etc/security/keytabs/om.keytab +OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.principal=HTTP/dn@EXAMPLE.COM +OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.keytab=/etc/security/keytabs/dn.keytab +OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/s3g.keytab +OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/s3g@EXAMPLE.COM +OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.keytab=/etc/security/keytabs/httpfs.keytab +OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.principal=HTTP/httpfs@EXAMPLE.COM +OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=* +OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab + +CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false +CORE-SITE.XML_hadoop.http.authentication.signature.secret.file=/etc/security/http_secret +CORE-SITE.XML_hadoop.http.authentication.type=kerberos +CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM +CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab + +CORE-SITE.XML_hadoop.security.authorization=true +HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* +HADOOP-POLICY.XML_ozone.security.reconfigure.protocol.acl=* + +KMS-SITE.XML_hadoop.kms.proxyuser.s3g.users=* +KMS-SITE.XML_hadoop.kms.proxyuser.s3g.groups=* +KMS-SITE.XML_hadoop.kms.proxyuser.s3g.hosts=* + +OZONE_DATANODE_SECURE_USER=root +JSVC_HOME=/usr/bin + +OZONE_LOG_DIR=/var/log/hadoop + +no_proxy=om,scm,recon,s3g,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf b/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf new file mode 100644 index 00000000000..eefc5b9c685 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[logging] +default = FILE:/var/log/krb5libs.log +kdc = FILE:/var/log/krb5kdc.log +admin_server = FILE:/var/log/kadmind.log + +[libdefaults] + dns_canonicalize_hostname = false + dns_lookup_realm = false + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + default_realm = EXAMPLE.COM + +[realms] + EXAMPLE.COM = { + kdc = kdc + admin_server = kdc + max_renewable_life = 7d + } + +[domain_realm] + .example.com = EXAMPLE.COM + example.com = EXAMPLE.COM + diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/lib.sh b/hadoop-ozone/dist/src/main/compose/xcompat/lib.sh new file mode 100755 index 00000000000..db7a6273464 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/xcompat/lib.sh @@ -0,0 +1,114 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR +basename=$(basename ${COMPOSE_DIR}) + +# version is used in bucket name, which does not allow uppercase +current_version="$(echo "${ozone.version}" | sed -e 's/-SNAPSHOT//' | tr '[:upper:]' '[:lower:]')" +# TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters +old_versions="1.1.0 1.2.1 1.3.0 1.4.0 1.4.1" # container is needed for each version in clients.yaml + +# shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh +source "${COMPOSE_DIR}/../testlib.sh" + +export SECURITY_ENABLED=true +: ${OZONE_BUCKET_KEY_NAME:=key1} + +echo 'Compatibility Test' > "${TEST_DATA_DIR}"/small + +client() { + if [[ "${client_version}" == "${current_version}" ]]; then + OZONE_DIR=/opt/hadoop + container=new_client + else + OZONE_DIR=/opt/ozone + container="old_client_${client_version//./_}" + fi + + "$@" +} + +_kinit() { + execute_command_in_container ${container} kinit -k -t /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM +} + +_init() { + container=scm + _kinit + execute_command_in_container ${container} ozone freon ockg -n1 -t1 -p warmup +} + +_write() { + _kinit + execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-write" \ + -v CLIENT_VERSION:${client_version} \ + -v CLUSTER_VERSION:${cluster_version} \ + -v TEST_DATA_DIR:/testdata \ + compatibility/write.robot +} + +_read() { + _kinit + local data_version="$1" + execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${data_version}" \ + -v CLIENT_VERSION:${client_version} \ + -v CLUSTER_VERSION:${cluster_version} \ + -v DATA_VERSION:${data_version} \ + -v TEST_DATA_DIR:/testdata \ + compatibility/read.robot +} + +test_cross_compatibility() { + echo "Starting ${cluster_version} cluster with COMPOSE_FILE=${COMPOSE_FILE}" + + OZONE_KEEP_RESULTS=true start_docker_env 5 + + execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} + + _init + + # first write with client matching cluster version + client_version="${cluster_version}" client _write + + for client_version in "$@"; do + # skip write, since already done + if [[ "${client_version}" == "${cluster_version}" ]]; then + continue + fi + client _write + done + + for client_version in "$@"; do + for data_version in $(echo "$client_version" "$cluster_version" "$current_version" | xargs -n1 | sort -u); do + + # do not test old-only scenario + if [[ "${cluster_version}" != "${current_version}" ]] \ + && [[ "${client_version}" != "${current_version}" ]] \ + && [[ "${data_version}" != "${current_version}" ]]; then + continue + fi + + client _read ${data_version} + done + done + + KEEP_RUNNING=false stop_docker_env +} + +create_results_dir diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml index 6e3ff6cfbc9..275338d7e70 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml @@ -18,14 +18,39 @@ x-new-config: &new-config image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} + dns_search: . env_file: - docker-config volumes: - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf services: + kdc: + image: ${OZONE_TESTKRB5_IMAGE} + hostname: kdc + dns_search: . + volumes: + - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + command: ["/opt/hadoop/compose/common/init-kdc.sh"] + kms: + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + hostname: kms + dns_search: . + ports: + - 9600:9600 + env_file: + - ./docker-config + environment: + HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop + volumes: + - ../../libexec/transformation.py:/opt/transformation.py + command: [ "hadoop", "kms" ] datanode: <<: *new-config + hostname: dn ports: - 19864 - 9882 @@ -34,15 +59,17 @@ services: command: ["ozone","datanode"] om: <<: *new-config + hostname: om environment: ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - OZONE_OPTS: + OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false ports: - 9874:9874 - 9862:9862 command: ["ozone","om"] recon: <<: *new-config + hostname: recon ports: - 9888:9888 environment: @@ -50,6 +77,7 @@ services: command: ["ozone","recon"] s3g: <<: *new-config + hostname: s3g environment: OZONE_OPTS: ports: @@ -57,9 +85,12 @@ services: command: ["ozone","s3g"] scm: <<: *new-config + hostname: scm ports: - 9876:9876 + - 9860:9860 environment: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}" OZONE_OPTS: command: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml index c603bb51df3..e3df1b3dda0 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml @@ -17,15 +17,40 @@ # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) x-old-config: &old-config - image: apache/ozone:${OZONE_VERSION} + image: ${OZONE_IMAGE}:${OZONE_VERSION}${OZONE_IMAGE_FLAVOR} + dns_search: . env_file: - docker-config volumes: - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf services: + kdc: + image: ${OZONE_TESTKRB5_IMAGE} + hostname: kdc + dns_search: . + volumes: + - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + command: ["/opt/ozone/compose/common/init-kdc.sh"] + kms: + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + hostname: kms + dns_search: . + ports: + - 9600:9600 + env_file: + - ./docker-config + environment: + HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop + volumes: + - ../../libexec/transformation.py:/opt/transformation.py + command: [ "hadoop", "kms" ] datanode: <<: *old-config + hostname: dn ports: - 19864 - 9882 @@ -34,8 +59,10 @@ services: command: ["ozone","datanode"] om: <<: *old-config + hostname: om environment: ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false HADOOP_OPTS: ports: - 9874:9874 @@ -43,6 +70,7 @@ services: command: ["ozone","om"] recon: <<: *old-config + hostname: recon ports: - 9888:9888 environment: @@ -50,6 +78,7 @@ services: command: ["ozone","recon"] s3g: <<: *old-config + hostname: s3g environment: HADOOP_OPTS: ports: @@ -57,9 +86,11 @@ services: command: ["ozone","s3g"] scm: <<: *old-config + hostname: scm ports: - 9876:9876 environment: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}" HADOOP_OPTS: command: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test-new.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test-new.sh new file mode 100755 index 00000000000..3acea6b7932 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/xcompat/test-new.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:compat-new + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +# shellcheck source=hadoop-ozone/dist/src/main/compose/xcompat/lib.sh +source "${COMPOSE_DIR}/lib.sh" + +# current cluster with various clients +COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${current_version} test_cross_compatibility ${old_versions} ${current_version} diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test-old.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test-old.sh new file mode 100755 index 00000000000..d54b555262f --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/xcompat/test-old.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:compat-old + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +# shellcheck source=hadoop-ozone/dist/src/main/compose/xcompat/lib.sh +source "${COMPOSE_DIR}/lib.sh" + +# old cluster with clients: same version and current version +for cluster_version in ${old_versions}; do + export OZONE_VERSION=${cluster_version} + COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility ${cluster_version} ${current_version} +done diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh deleted file mode 100755 index 695d8bf06ab..00000000000 --- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#suite:compat - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR -basename=$(basename ${COMPOSE_DIR}) - -current_version="${ozone.version}" -old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml - -# shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh -source "${COMPOSE_DIR}/../testlib.sh" - -old_client() { - OZONE_DIR=/opt/ozone - container=${client} - "$@" -} - -new_client() { - OZONE_DIR=/opt/hadoop - container=new_client - client_version=${current_version} - "$@" -} - -_init() { - execute_command_in_container ${container} ozone freon ockg -n1 -t1 -p warmup -} - -_write() { - execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-write" -v SUFFIX:${client_version} compatibility/write.robot -} - -_read() { - local data_version="$1" - execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${data_version}" -v SUFFIX:${data_version} compatibility/read.robot -} - -test_cross_compatibility() { - echo "Starting cluster with COMPOSE_FILE=${COMPOSE_FILE}" - - OZONE_KEEP_RESULTS=true start_docker_env - - execute_command_in_container scm ozone freon ockg -n1 -t1 -p warmup - new_client _write - new_client _read ${current_version} - - for client_version in "$@"; do - client="old_client_${client_version//./_}" - - old_client _write - old_client _read ${client_version} - - old_client _read ${current_version} - new_client _read ${client_version} - done - - KEEP_RUNNING=false stop_docker_env -} - -test_ec_cross_compatibility() { - echo "Running Erasure Coded storage backward compatibility tests." - # local cluster_versions_with_ec="1.3.0 1.4.0 ${current_version}" - local cluster_versions_with_ec="${current_version}" # until HDDS-11334 - local non_ec_client_versions="1.0.0 1.1.0 1.2.1" - - for cluster_version in ${cluster_versions_with_ec}; do - export COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${cluster_version} - OZONE_KEEP_RESULTS=true start_docker_env 5 - - echo -n "Generating data locally... " - dd if=/dev/urandom of=/tmp/1mb bs=1048576 count=1 >/dev/null 2>&1 - dd if=/dev/urandom of=/tmp/2mb bs=1048576 count=2 >/dev/null 2>&1 - dd if=/dev/urandom of=/tmp/3mb bs=1048576 count=3 >/dev/null 2>&1 - echo "done" - echo -n "Copy data into client containers... " - for container in $(docker ps --format '{{.Names}}' | grep client); do - docker cp /tmp/1mb ${container}:/tmp/1mb - docker cp /tmp/2mb ${container}:/tmp/2mb - docker cp /tmp/3mb ${container}:/tmp/3mb - done - echo "done" - rm -f /tmp/1mb /tmp/2mb /tmp/3mb - - - local prefix=$(LC_CTYPE=C tr -dc '[:alnum:]' < /dev/urandom | head -c 5 | tr '[:upper:]' '[:lower:]') - OZONE_DIR=/opt/hadoop - execute_robot_test new_client --include setup-ec-data -N "xcompat-cluster-${cluster_version}-setup-data" -v prefix:"${prefix}" ec/backward-compat.robot - OZONE_DIR=/opt/ozone - - for client_version in ${non_ec_client_versions}; do - client="old_client_${client_version//./_}" - unset OUTPUT_PATH - execute_robot_test "${client}" --include test-ec-compat -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${cluster_version}" -v prefix:"${prefix}" ec/backward-compat.robot - done - - KEEP_RUNNING=false stop_docker_env - done -} - -create_results_dir - -# current cluster with various clients -COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${current_version} test_cross_compatibility ${old_versions} - -# old cluster with clients: same version and current version -for cluster_version in ${old_versions}; do - export OZONE_VERSION=${cluster_version} - COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility ${cluster_version} -done - -test_ec_cross_compatibility diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml index 88a36835c29..8239325dd97 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml @@ -13,22 +13,28 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + apiVersion: v1 kind: ConfigMap metadata: name: config data: - + CORE-SITE.XML_fs.defaultFS: ofs://om/ + CORE-SITE.XML_fs.trash.interval: "1" + HTTPFS-SITE.XML_httpfs.hadoop.config.dir: /opt/hadoop/etc/config + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.groups: "*" + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.hosts: "*" + OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" OZONE-SITE.XML_hdds.datanode.dir: "/data/storage" - OZONE-SITE.XML_ozone.scm.datanode.id.dir: "/data" + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" + OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" OZONE-SITE.XML_ozone.metadata.dirs: "/data/metadata" - OZONE-SITE.XML_ozone.scm.block.client.address: "scm-0.scm" OZONE-SITE.XML_ozone.om.address: "om-0.om" + OZONE-SITE.XML_ozone.recon.address: "recon-0.recon" + OZONE-SITE.XML_ozone.scm.block.client.address: "scm-0.scm" OZONE-SITE.XML_ozone.scm.client.address: "scm-0.scm" + OZONE-SITE.XML_ozone.scm.datanode.id.dir: "/data/metadata" OZONE-SITE.XML_ozone.scm.names: "scm-0.scm" - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" - OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" - OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" LOG4J.PROPERTIES_log4j.rootLogger: "INFO, stdout" LOG4J.PROPERTIES_log4j.appender.stdout: "org.apache.log4j.ConsoleAppender" LOG4J.PROPERTIES_log4j.appender.stdout.layout: "org.apache.log4j.PatternLayout" diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/httpfs-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/httpfs-ss-service.yaml new file mode 100644 index 00000000000..0b8f1175246 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/httpfs-ss-service.yaml @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: Service +metadata: + name: httpfs +spec: + ports: + - port: 14000 + name: rest + clusterIP: None + selector: + app: ozone + component: httpfs diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/httpfs-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/httpfs-ss.yaml new file mode 100644 index 00000000000..1084ca0373c --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/httpfs-ss.yaml @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: httpfs + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: httpfs + serviceName: httpfs + replicas: 1 + template: + metadata: + labels: + app: ozone + component: httpfs + spec: + containers: + - name: httpfs + image: "@docker.image@" + args: ["ozone","httpfs"] + livenessProbe: + httpGet: + path: /webhdfs/v1/?op=LISTSTATUS&user.name=hadoop + port: 14000 + initialDelaySeconds: 30 diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/recon-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/recon-ss-service.yaml new file mode 100644 index 00000000000..a7994a6b91a --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/recon-ss-service.yaml @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: Service +metadata: + name: recon +spec: + ports: + - port: 9888 + name: ui + clusterIP: None + selector: + app: ozone + component: recon diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/recon-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/recon-ss.yaml new file mode 100644 index 00000000000..f06dc54c664 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/recon-ss.yaml @@ -0,0 +1,52 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: recon + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: recon + serviceName: recon + replicas: 1 + template: + metadata: + labels: + app: ozone + component: recon + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9888" + prometheus.io/path: "/prom" + spec: + securityContext: + fsGroup: 1000 + containers: + - name: recon + image: "@docker.image@" + args: ["ozone","recon"] + env: + - name: WAITFOR + value: scm-0.scm:9876 + livenessProbe: + tcpSocket: + port: 9891 + initialDelaySeconds: 30 + volumes: [] diff --git a/hadoop-ozone/dist/src/main/k8s/examples/README.md b/hadoop-ozone/dist/src/main/k8s/examples/README.md new file mode 100644 index 00000000000..574efc6704a --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/README.md @@ -0,0 +1,22 @@ + + +# Ozone examples for Kubernetes + +This directory contains example resources for running Ozone in Kubernetes. + +Note that the files are generated based on the [definitions](../definitions) using [Flekszible](https://github.com/elek/flekszible). If you would like to modify them permanently, edit the definition and regenerate the examples by running `regenerate-all.sh` after installing Flekszible. As a developer, you can run `hadoop-ozone/dev-support/k8s/regenerate-examples.sh`, which will also install Flekszible. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml index b3acc6f1d22..4e503b7bd05 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml @@ -19,16 +19,22 @@ kind: ConfigMap metadata: name: config data: + CORE-SITE.XML_fs.defaultFS: ofs://om/ + CORE-SITE.XML_fs.trash.interval: "1" + HTTPFS-SITE.XML_httpfs.hadoop.config.dir: /opt/hadoop/etc/config + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.groups: '*' + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.hosts: '*' + OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" + OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om + OZONE-SITE.XML_ozone.recon.address: recon-0.recon + OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" - OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" - OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-public-service.yaml new file mode 100644 index 00000000000..d8586250553 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs-public +spec: + ports: + - port: 14000 + name: rest + selector: + app: ozone + component: httpfs + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-service.yaml new file mode 100644 index 00000000000..0ab49c2d72e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs +spec: + ports: + - port: 14000 + name: rest + clusterIP: None + selector: + app: ozone + component: httpfs diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-statefulset.yaml new file mode 100644 index 00000000000..7bca21585c1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-statefulset.yaml @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: httpfs + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: httpfs + serviceName: httpfs + replicas: 1 + template: + metadata: + labels: + app: ozone + component: httpfs + spec: + containers: + - name: httpfs + image: '@docker.image@' + args: + - ozone + - httpfs + livenessProbe: + httpGet: + path: /webhdfs/v1/?op=LISTSTATUS&user.name=hadoop + port: 14000 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml index 3059b9c801e..427c654d543 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml @@ -18,13 +18,19 @@ resources: - config-configmap.yaml - datanode-service.yaml - datanode-statefulset.yaml +- httpfs-service.yaml +- httpfs-statefulset.yaml - om-service.yaml - om-statefulset.yaml +- recon-service.yaml +- recon-statefulset.yaml - s3g-service.yaml - s3g-statefulset.yaml - scm-service.yaml - scm-statefulset.yaml - datanode-public-service.yaml +- httpfs-public-service.yaml - om-public-service.yaml +- recon-public-service.yaml - s3g-public-service.yaml - scm-public-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-public-service.yaml new file mode 100644 index 00000000000..c737a02f446 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon-public +spec: + ports: + - port: 9888 + name: ui + selector: + app: ozone + component: recon + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-service.yaml new file mode 100644 index 00000000000..9c52d393d55 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon +spec: + ports: + - port: 9888 + name: ui + clusterIP: None + selector: + app: ozone + component: recon diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-statefulset.yaml new file mode 100644 index 00000000000..8b9ee191d03 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-statefulset.yaml @@ -0,0 +1,63 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: recon + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: recon + serviceName: recon + replicas: 1 + template: + metadata: + labels: + app: ozone + component: recon + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9888" + prometheus.io/path: /prom + spec: + securityContext: + fsGroup: 1000 + containers: + - name: recon + image: '@docker.image@' + args: + - ozone + - recon + env: + - name: WAITFOR + value: scm-0.scm:9876 + livenessProbe: + tcpSocket: + port: 9891 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml index b3acc6f1d22..4e503b7bd05 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml @@ -19,16 +19,22 @@ kind: ConfigMap metadata: name: config data: + CORE-SITE.XML_fs.defaultFS: ofs://om/ + CORE-SITE.XML_fs.trash.interval: "1" + HTTPFS-SITE.XML_httpfs.hadoop.config.dir: /opt/hadoop/etc/config + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.groups: '*' + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.hosts: '*' + OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" + OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om + OZONE-SITE.XML_ozone.recon.address: recon-0.recon + OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" - OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" - OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-public-service.yaml new file mode 100644 index 00000000000..d8586250553 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs-public +spec: + ports: + - port: 14000 + name: rest + selector: + app: ozone + component: httpfs + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-service.yaml new file mode 100644 index 00000000000..0ab49c2d72e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs +spec: + ports: + - port: 14000 + name: rest + clusterIP: None + selector: + app: ozone + component: httpfs diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-statefulset.yaml new file mode 100644 index 00000000000..7bca21585c1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/httpfs-statefulset.yaml @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: httpfs + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: httpfs + serviceName: httpfs + replicas: 1 + template: + metadata: + labels: + app: ozone + component: httpfs + spec: + containers: + - name: httpfs + image: '@docker.image@' + args: + - ozone + - httpfs + livenessProbe: + httpGet: + path: /webhdfs/v1/?op=LISTSTATUS&user.name=hadoop + port: 14000 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/kustomization.yaml index 3059b9c801e..427c654d543 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/kustomization.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/kustomization.yaml @@ -18,13 +18,19 @@ resources: - config-configmap.yaml - datanode-service.yaml - datanode-statefulset.yaml +- httpfs-service.yaml +- httpfs-statefulset.yaml - om-service.yaml - om-statefulset.yaml +- recon-service.yaml +- recon-statefulset.yaml - s3g-service.yaml - s3g-statefulset.yaml - scm-service.yaml - scm-statefulset.yaml - datanode-public-service.yaml +- httpfs-public-service.yaml - om-public-service.yaml +- recon-public-service.yaml - s3g-public-service.yaml - scm-public-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-public-service.yaml new file mode 100644 index 00000000000..c737a02f446 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon-public +spec: + ports: + - port: 9888 + name: ui + selector: + app: ozone + component: recon + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-service.yaml new file mode 100644 index 00000000000..9c52d393d55 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon +spec: + ports: + - port: 9888 + name: ui + clusterIP: None + selector: + app: ozone + component: recon diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-statefulset.yaml new file mode 100644 index 00000000000..8b9ee191d03 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/recon-statefulset.yaml @@ -0,0 +1,63 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: recon + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: recon + serviceName: recon + replicas: 1 + template: + metadata: + labels: + app: ozone + component: recon + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9888" + prometheus.io/path: /prom + spec: + securityContext: + fsGroup: 1000 + containers: + - name: recon + image: '@docker.image@' + args: + - ozone + - recon + env: + - name: WAITFOR + value: scm-0.scm:9876 + livenessProbe: + tcpSocket: + port: 9891 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml index 122382afdbd..7a966957364 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml @@ -19,16 +19,22 @@ kind: ConfigMap metadata: name: config data: + CORE-SITE.XML_fs.defaultFS: ofs://om/ + CORE-SITE.XML_fs.trash.interval: "1" + HTTPFS-SITE.XML_httpfs.hadoop.config.dir: /opt/hadoop/etc/config + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.groups: '*' + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.hosts: '*' + OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" + OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om + OZONE-SITE.XML_ozone.recon.address: recon-0.recon + OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" - OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" - OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-public-service.yaml new file mode 100644 index 00000000000..d8586250553 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs-public +spec: + ports: + - port: 14000 + name: rest + selector: + app: ozone + component: httpfs + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-service.yaml new file mode 100644 index 00000000000..0ab49c2d72e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs +spec: + ports: + - port: 14000 + name: rest + clusterIP: None + selector: + app: ozone + component: httpfs diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-statefulset.yaml new file mode 100644 index 00000000000..59abe8547f6 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/httpfs-statefulset.yaml @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: httpfs + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: httpfs + serviceName: httpfs + replicas: 1 + template: + metadata: + labels: + app: ozone + component: httpfs + spec: + containers: + - name: httpfs + image: '@docker.image@' + args: + - ozone + - httpfs + livenessProbe: + httpGet: + path: /webhdfs/v1/?op=LISTSTATUS&user.name=hadoop + port: 14000 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + env: + - name: JAEGER_SAMPLER_TYPE + value: probabilistic + - name: JAEGER_SAMPLER_PARAM + value: "0.01" + - name: JAEGER_AGENT_HOST + value: jaeger-0.jaeger + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/kustomization.yaml index cf0cbe152d2..f654f6f510f 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/kustomization.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/kustomization.yaml @@ -18,8 +18,12 @@ resources: - config-configmap.yaml - datanode-service.yaml - datanode-statefulset.yaml +- httpfs-service.yaml +- httpfs-statefulset.yaml - om-service.yaml - om-statefulset.yaml +- recon-service.yaml +- recon-statefulset.yaml - s3g-service.yaml - s3g-statefulset.yaml - scm-service.yaml @@ -33,7 +37,9 @@ resources: - jaeger-service.yaml - jaeger-statefulset.yaml - datanode-public-service.yaml +- httpfs-public-service.yaml - om-public-service.yaml +- recon-public-service.yaml - s3g-public-service.yaml - scm-public-service.yaml - jaeger-public-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-public-service.yaml new file mode 100644 index 00000000000..c737a02f446 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon-public +spec: + ports: + - port: 9888 + name: ui + selector: + app: ozone + component: recon + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-service.yaml new file mode 100644 index 00000000000..9c52d393d55 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon +spec: + ports: + - port: 9888 + name: ui + clusterIP: None + selector: + app: ozone + component: recon diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-statefulset.yaml new file mode 100644 index 00000000000..6466c29595c --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/recon-statefulset.yaml @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: recon + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: recon + serviceName: recon + replicas: 1 + template: + metadata: + labels: + app: ozone + component: recon + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9888" + prometheus.io/path: /prom + spec: + securityContext: + fsGroup: 1000 + containers: + - name: recon + image: '@docker.image@' + args: + - ozone + - recon + env: + - name: WAITFOR + value: scm-0.scm:9876 + - name: JAEGER_SAMPLER_TYPE + value: probabilistic + - name: JAEGER_SAMPLER_PARAM + value: "0.01" + - name: JAEGER_AGENT_HOST + value: jaeger-0.jaeger + livenessProbe: + tcpSocket: + port: 9891 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml index 61555e1eb56..a9315eb2103 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml @@ -19,13 +19,19 @@ kind: ConfigMap metadata: name: config data: + CORE-SITE.XML_fs.defaultFS: ofs://om/ + CORE-SITE.XML_fs.trash.interval: "1" + HTTPFS-SITE.XML_httpfs.hadoop.config.dir: /opt/hadoop/etc/config + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.groups: '*' + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.hosts: '*' + OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data - OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" - OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" + OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata + OZONE-SITE.XML_ozone.om.address: om-0.om + OZONE-SITE.XML_ozone.recon.address: recon-0.recon + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/httpfs-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/httpfs-service.yaml new file mode 100644 index 00000000000..0ab49c2d72e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/httpfs-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs +spec: + ports: + - port: 14000 + name: rest + clusterIP: None + selector: + app: ozone + component: httpfs diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/httpfs-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/httpfs-statefulset.yaml new file mode 100644 index 00000000000..2c076ae8fcd --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/httpfs-statefulset.yaml @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: httpfs + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: httpfs + serviceName: httpfs + replicas: 1 + template: + metadata: + labels: + app: ozone + component: httpfs + spec: + containers: + - name: httpfs + image: '@docker.image@' + args: + - ozone + - httpfs + livenessProbe: + httpGet: + path: /webhdfs/v1/?op=LISTSTATUS&user.name=hadoop + port: 14000 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/kustomization.yaml index 6b3d553113e..49b6b6e2606 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/kustomization.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/kustomization.yaml @@ -18,8 +18,12 @@ resources: - config-configmap.yaml - datanode-service.yaml - datanode-statefulset.yaml +- httpfs-service.yaml +- httpfs-statefulset.yaml - om-service.yaml - om-statefulset.yaml +- recon-service.yaml +- recon-statefulset.yaml - s3g-service.yaml - s3g-statefulset.yaml - scm-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/recon-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/recon-service.yaml new file mode 100644 index 00000000000..9c52d393d55 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/recon-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon +spec: + ports: + - port: 9888 + name: ui + clusterIP: None + selector: + app: ozone + component: recon diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/recon-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/recon-statefulset.yaml new file mode 100644 index 00000000000..445c2e222d7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/recon-statefulset.yaml @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: recon + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: recon + serviceName: recon + replicas: 1 + template: + metadata: + labels: + app: ozone + component: recon + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9888" + prometheus.io/path: /prom + spec: + securityContext: + fsGroup: 1000 + containers: + - name: recon + image: '@docker.image@' + args: + - ozone + - recon + env: + - name: WAITFOR + value: scm-0.scm:9876 + livenessProbe: + tcpSocket: + port: 9891 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: [] + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml index 92fe9166d03..0825afc37b6 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml @@ -19,16 +19,22 @@ kind: ConfigMap metadata: name: config data: + CORE-SITE.XML_fs.defaultFS: ofs://om/ + CORE-SITE.XML_fs.trash.interval: "1" + HTTPFS-SITE.XML_httpfs.hadoop.config.dir: /opt/hadoop/etc/config + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.groups: '*' + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.hosts: '*' + OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" + OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om + OZONE-SITE.XML_ozone.recon.address: recon-0.recon + OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" - OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" - OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/httpfs-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/httpfs-service.yaml new file mode 100644 index 00000000000..0ab49c2d72e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/httpfs-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs +spec: + ports: + - port: 14000 + name: rest + clusterIP: None + selector: + app: ozone + component: httpfs diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/httpfs-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/httpfs-statefulset.yaml new file mode 100644 index 00000000000..2c076ae8fcd --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/httpfs-statefulset.yaml @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: httpfs + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: httpfs + serviceName: httpfs + replicas: 1 + template: + metadata: + labels: + app: ozone + component: httpfs + spec: + containers: + - name: httpfs + image: '@docker.image@' + args: + - ozone + - httpfs + livenessProbe: + httpGet: + path: /webhdfs/v1/?op=LISTSTATUS&user.name=hadoop + port: 14000 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/kustomization.yaml index 6b3d553113e..49b6b6e2606 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/kustomization.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/kustomization.yaml @@ -18,8 +18,12 @@ resources: - config-configmap.yaml - datanode-service.yaml - datanode-statefulset.yaml +- httpfs-service.yaml +- httpfs-statefulset.yaml - om-service.yaml - om-statefulset.yaml +- recon-service.yaml +- recon-statefulset.yaml - s3g-service.yaml - s3g-statefulset.yaml - scm-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/recon-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/recon-service.yaml new file mode 100644 index 00000000000..9c52d393d55 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/recon-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon +spec: + ports: + - port: 9888 + name: ui + clusterIP: None + selector: + app: ozone + component: recon diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/recon-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/recon-statefulset.yaml new file mode 100644 index 00000000000..445c2e222d7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/recon-statefulset.yaml @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: recon + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: recon + serviceName: recon + replicas: 1 + template: + metadata: + labels: + app: ozone + component: recon + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9888" + prometheus.io/path: /prom + spec: + securityContext: + fsGroup: 1000 + containers: + - name: recon + image: '@docker.image@' + args: + - ozone + - recon + env: + - name: WAITFOR + value: scm-0.scm:9876 + livenessProbe: + tcpSocket: + port: 9891 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: [] + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab b/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab deleted file mode 100755 index 889de4410dc..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/dn.keytab b/hadoop-ozone/dist/src/main/keytabs/dn.keytab deleted file mode 100755 index a3f03127c81..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/dn.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab b/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab deleted file mode 100755 index 239f7271404..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab b/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab deleted file mode 100755 index 99b446f2178..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/jhs.keytab b/hadoop-ozone/dist/src/main/keytabs/jhs.keytab deleted file mode 100755 index 458b42240a5..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/jhs.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/nm.keytab b/hadoop-ozone/dist/src/main/keytabs/nm.keytab deleted file mode 100755 index 8b325773acd..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/nm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/om.keytab b/hadoop-ozone/dist/src/main/keytabs/om.keytab deleted file mode 100755 index fa4a1c59c06..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/om.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/recon.keytab b/hadoop-ozone/dist/src/main/keytabs/recon.keytab deleted file mode 100755 index 4bd3c9e38f4..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/recon.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/rm.keytab b/hadoop-ozone/dist/src/main/keytabs/rm.keytab deleted file mode 100755 index 17feed24128..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/rm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/s3g.keytab b/hadoop-ozone/dist/src/main/keytabs/s3g.keytab deleted file mode 100755 index e7722c546aa..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/s3g.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/scm.keytab b/hadoop-ozone/dist/src/main/keytabs/scm.keytab deleted file mode 100755 index adb7cd58054..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/scm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/testuser.keytab b/hadoop-ozone/dist/src/main/keytabs/testuser.keytab deleted file mode 100755 index add20797af6..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/testuser.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab b/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab deleted file mode 100755 index 55a9167579a..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh b/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh deleted file mode 100755 index 5094a6bf857..00000000000 --- a/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) -set -ex - -export_keytab() { - kadmin.local -q "ktadd -norandkey -k /etc/security/keytabs/$2.keytab $1@EXAMPLE.COM" -} - -#this section supposed to be executed inside a docker image which already -#has these credentials -# -# the rest of the code executes this part inside a container -# -if [ "$1" == "internal" ]; then - rm /etc/security/keytabs/*.keytab - - export_keytab scm/scm scm - export_keytab HTTP/scm scm - export_keytab testuser/scm scm - export_keytab testuser2/scm scm - - export_keytab testuser/dn testuser - export_keytab testuser/httpfs testuser - export_keytab testuser/om testuser - export_keytab testuser/recon testuser - export_keytab testuser/s3g testuser - export_keytab testuser/scm testuser - - export_keytab testuser2/dn testuser2 - export_keytab testuser2/httpfs testuser2 - export_keytab testuser2/om testuser2 - export_keytab testuser2/recon testuser2 - export_keytab testuser2/s3g testuser2 - export_keytab testuser2/scm testuser2 - - export_keytab om/om om - export_keytab HTTP/om om - export_keytab testuser/om om - export_keytab testuser2/om om - - export_keytab s3g/s3g s3g - export_keytab HTTP/s3g s3g - export_keytab testuser/s3g s3g - export_keytab testuser2/s3g s3g - - export_keytab httpfs/httpfs httpfs - export_keytab HTTP/httpfs httpfs - export_keytab testuser/httpfs httpfs - export_keytab testuser2/httpfs httpfs - - export_keytab recon/recon recon - export_keytab HTTP/recon recon - export_keytab testuser/recon recon - export_keytab testuser2/recon recon - - export_keytab dn/dn dn - export_keytab HTTP/dn dn - export_keytab testuser/dn dn - export_keytab testuser2/dn dn - - export_keytab HTTP/scm HTTP - export_keytab HTTP/s3g HTTP - export_keytab HTTP/httpfs HTTP - export_keytab HTTP/ozone HTTP - - export_keytab hadoop/rm hadoop - - export_keytab rm/rm rm - export_keytab nm/nm nm - export_keytab jhs/jhs jhs - - - - chmod 755 /etc/security/keytabs/*.keytab - chown 1000. /etc/security/keytabs/*.keytab - exit 0 -fi - -TESTKRB5_IMAGE=$(mvn -f "$SCRIPT_DIR"/../../../pom.xml help:evaluate -Dexpression=docker.ozone-testkr5b.image -q -DforceStdout -Dscan=false) - -docker run -it --entrypoint=/etc/security/keytabs/update-keytabs.sh -v "$SCRIPT_DIR":/etc/security/keytabs $TESTKRB5_IMAGE internal - - diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index c28483c6735..b291afc568a 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -300,6 +300,7 @@ Apache License 2.0 com.nimbusds:nimbus-jose-jwt com.squareup.okhttp3:okhttp com.squareup.okio:okio + com.squareup.okio:okio-jvm commons-beanutils:commons-beanutils commons-cli:commons-cli commons-codec:commons-codec @@ -314,6 +315,7 @@ Apache License 2.0 commons-validator:commons-validator commons-fileupload:commons-fileupload info.picocli:picocli + info.picocli:picocli-shell-jline3 io.dropwizard.metrics:metrics-core io.grpc:grpc-api io.grpc:grpc-context @@ -407,6 +409,7 @@ Apache License 2.0 org.apache.ratis:ratis-proto org.apache.ratis:ratis-server org.apache.ratis:ratis-server-api + org.apache.ratis:ratis-shell org.apache.ratis:ratis-thirdparty-misc org.apache.ratis:ratis-tools org.apache.thrift:libthrift @@ -430,6 +433,8 @@ Apache License 2.0 org.jetbrains:annotations org.jetbrains.kotlin:kotlin-stdlib org.jetbrains.kotlin:kotlin-stdlib-common + org.jetbrains.kotlin:kotlin-stdlib-jdk7 + org.jetbrains.kotlin:kotlin-stdlib-jdk8 org.jboss.weld.servlet:weld-servlet-shaded org.jheaps:jheaps org.jooq:jooq @@ -475,6 +480,7 @@ BSD 3-Clause com.google.re2j:re2j com.jcraft:jsch com.thoughtworks.paranamer:paranamer + org.jline:jline3 org.ow2.asm:asm org.ow2.asm:asm-analysis org.ow2.asm:asm-commons diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 042c9380e4a..be48c1d1fe2 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -63,7 +63,7 @@ share/ozone/lib/hadoop-common.jar share/ozone/lib/hadoop-hdfs-client.jar share/ozone/lib/hadoop-hdfs.jar share/ozone/lib/hadoop-shaded-guava.jar -share/ozone/lib/hadoop-shaded-protobuf_3_7.jar +share/ozone/lib/hadoop-shaded-protobuf_3_25.jar share/ozone/lib/hdds-annotation-processing.jar share/ozone/lib/hdds-client.jar share/ozone/lib/hdds-common.jar @@ -134,6 +134,7 @@ share/ozone/lib/jersey-hk2.jar share/ozone/lib/jersey-media-jaxb.jar share/ozone/lib/jersey-media-json-jackson.jar share/ozone/lib/jersey-server.jar +share/ozone/lib/jettison.jar share/ozone/lib/jetty-client.jar share/ozone/lib/jetty-http.jar share/ozone/lib/jetty-io.jar @@ -150,6 +151,7 @@ share/ozone/lib/jgrapht-core.jar share/ozone/lib/jgrapht-ext.jar share/ozone/lib/jgraphx.jar share/ozone/lib/jheaps.jar +share/ozone/lib/jline.jar share/ozone/lib/jmespath-java.jar share/ozone/lib/jna.jar share/ozone/lib/jna-platform.jar @@ -202,6 +204,7 @@ share/ozone/lib/netty-tcnative-classes.Final.jar share/ozone/lib/netty-transport.Final.jar share/ozone/lib/netty-transport-classes-epoll.Final.jar share/ozone/lib/netty-transport-native-epoll.Final-linux-x86_64.jar +share/ozone/lib/netty-transport-native-epoll.Final.jar share/ozone/lib/netty-transport-native-unix-common.Final.jar share/ozone/lib/nimbus-jose-jwt.jar share/ozone/lib/okhttp.jar @@ -234,6 +237,7 @@ share/ozone/lib/ozone-s3gateway.jar share/ozone/lib/ozone-tools.jar share/ozone/lib/perfmark-api.jar share/ozone/lib/picocli.jar +share/ozone/lib/picocli-shell-jline3.jar share/ozone/lib/protobuf-java.jar share/ozone/lib/protobuf-java.jar share/ozone/lib/protobuf-java-util.jar @@ -252,6 +256,7 @@ share/ozone/lib/ratis-netty.jar share/ozone/lib/ratis-proto.jar share/ozone/lib/ratis-server-api.jar share/ozone/lib/ratis-server.jar +share/ozone/lib/ratis-shell.jar share/ozone/lib/ratis-thirdparty-misc.jar share/ozone/lib/ratis-tools.jar share/ozone/lib/re2j.jar diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md b/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md index 2581412d320..c473ed2fe16 100644 --- a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md +++ b/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md @@ -14,8 +14,8 @@ # Important -The files from this directory are not copied by automatically to the source distribution package. +The files from this directory are copied automatically to the source distribution package +via the `hadoop-ozone/dist/src/main/assemblies/ozone-src.xml` file. If you add any of the files to here, - * please also adjust `hadoop-ozone/dist/src/main/assemblies/ozone-src.xml` file. - * and copy the dependency to ../../bin/licenses (if it's included in the bin tar) \ No newline at end of file + * copy the dependency to ../../bin/licenses (if it's included in the bin tar) \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index fae08991781..e618ebd063d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -101,6 +101,18 @@ Report containers as JSON Should contain ${output} stats Should contain ${output} samples +List all containers + ${output} = Execute ozone admin container list --all + Should contain ${output} OPEN + +List all containers according to count (batchSize) + ${output} = Execute ozone admin container list --all --count 10 + Should contain ${output} OPEN + +List all containers from a particular container ID + ${output} = Execute ozone admin container list --all --start 1 + Should contain ${output} OPEN + #List containers on unknown host # ${output} = Execute And Ignore Error ozone admin --verbose container list --scm unknown-host # Should contain ${output} Invalid host name diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot index 29727548561..f1628939451 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot @@ -30,4 +30,8 @@ Run scm roles List scm roles as JSON ${output} = Execute ozone admin scm roles --json ${leader} = Execute echo '${output}' | jq -r '.[] | select(.raftPeerRole == "LEADER")' - Should Not Be Equal ${leader} ${EMPTY} \ No newline at end of file + Should Not Be Equal ${leader} ${EMPTY} + +List scm roles as TABLE + ${output} = Execute ozone admin scm roles --table + Should Match Regexp ${output} \\|.*LEADER.* \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot index 4299afe5f2d..641bc1462bb 100644 --- a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot +++ b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot @@ -16,6 +16,7 @@ *** Settings *** Documentation Smoketest ozone cluster startup Library OperatingSystem +Library String Library Collections Resource ../commonlib.robot Resource ../ozone-lib/shell.robot @@ -35,7 +36,7 @@ Prepare For Tests Execute dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100 Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Execute ozone sh volume create /${VOLUME} - Execute ozone sh bucket create /${VOLUME}/${BUCKET} + Execute ozone sh bucket create --replication ${REPLICATION} --type ${TYPE} /${VOLUME}/${BUCKET} Datanode In Maintenance Mode @@ -61,28 +62,36 @@ Datanode Recommission is Finished Should Not Contain ${result} ENTERING_MAINTENANCE Run Container Balancer - ${result} = Execute ozone admin containerbalancer start -t 1 -d 100 -i 1 + ${result} = Execute ozone admin containerbalancer start -t 0.1 -d 100 -i 3 Should Contain ${result} Container Balancer started successfully. Wait Finish Of Balancing ${result} = Execute ozone admin containerbalancer status - Should Contain ${result} ContainerBalancer is Running. - Wait Until Keyword Succeeds 3min 10sec ContainerBalancer is Not Running - Sleep 60000ms + Wait Until Keyword Succeeds 4min 10sec ContainerBalancer is Not Running -Verify Verbose Balancer Status - [arguments] ${output} + Sleep 60000ms +Verify Balancer Iteration + [arguments] ${output} ${number} Should Contain ${output} ContainerBalancer is Running. Should Contain ${output} Started at: Should Contain ${output} Container Balancer Configuration values: - -Verify Balancer Iteration - [arguments] ${output} ${number} ${status} ${containers} - - Should Contain ${output} Iteration number ${number} - Should Contain ${output} Iteration result ${status} - Should Contain ${output} Scheduled to move containers ${containers} + Should Contain ${output} Iteration number ${number} collapse_spaces=True + Should Contain ${output} Scheduled to move containers collapse_spaces=True + Should Contain ${output} Balancing duration: + Should Contain ${output} Iteration duration + Should Contain ${output} Current iteration info: + +Verify Balancer Iteration History + [arguments] ${output} + Should Contain ${output} Iteration history list: + Should Contain X Times ${output} Size scheduled to move 1 collapse_spaces=True + Should Contain X Times ${output} Moved data size 1 collapse_spaces=True + Should Contain X Times ${output} Scheduled to move containers 1 collapse_spaces=True + Should Contain X Times ${output} Already moved containers 1 collapse_spaces=True + Should Contain X Times ${output} Failed to move containers 0 1 collapse_spaces=True + Should Contain X Times ${output} Failed to move containers by timeout 0 1 collapse_spaces=True + Should Contain ${output} Iteration result ITERATION_COMPLETED collapse_spaces=True Run Balancer Status ${result} = Execute ozone admin containerbalancer status @@ -90,15 +99,14 @@ Run Balancer Status Run Balancer Verbose Status ${result} = Execute ozone admin containerbalancer status -v - Verify Verbose Balancer Status ${result} - Verify Balancer Iteration ${result} 1 IN_PROGRESS 3 - Should Contain ${result} Current iteration info: + Verify Balancer Iteration ${result} 1 + Should Contain ${result} Iteration result - collapse_spaces=True + Run Balancer Verbose History Status ${result} = Execute ozone admin containerbalancer status -v --history - Verify Verbose Balancer Status ${result} - Verify Balancer Iteration ${result} 1 IN_PROGRESS 3 - Should Contain ${result} Iteration history list: + Verify Balancer Iteration ${result} 1 + Verify Balancer Iteration History ${result} ContainerBalancer is Not Running ${result} = Execute ozone admin containerbalancer status @@ -111,7 +119,7 @@ Create Multiple Keys ${fileName} = Set Variable file-${INDEX}.txt ${key} = Set Variable /${VOLUME}/${BUCKET}/${fileName} LOG ${fileName} - Create Key ${key} ${file} + Create Key ${key} ${file} --replication=${REPLICATION} --type=${TYPE} Key Should Match Local File ${key} ${file} END @@ -126,14 +134,14 @@ Get Uuid Close All Containers FOR ${INDEX} IN RANGE 15 - ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 + ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.data == 3) | .containerID' | head -1 EXIT FOR LOOP IF "${container}" == "${EMPTY}" ${message} = Execute And Ignore Error ozone admin container close "${container}" Run Keyword If '${message}' != '${EMPTY}' Should Contain ${message} is in closing state ${output} = Execute ozone admin container info "${container}" Should contain ${output} CLOS END - Wait until keyword succeeds 3min 10sec All container is closed + Wait until keyword succeeds 4min 10sec All container is closed All container is closed ${output} = Execute ozone admin container list --state OPEN @@ -146,7 +154,7 @@ Get Datanode Ozone Used Bytes Info [return] ${result} ** Test Cases *** -Verify Container Balancer for RATIS containers +Verify Container Balancer for RATIS/EC containers Prepare For Tests Datanode In Maintenance Mode @@ -169,16 +177,13 @@ Verify Container Balancer for RATIS containers Run Balancer Verbose Status - Run Balancer Verbose History Status + Wait Until Keyword Succeeds 40sec 5sec Run Balancer Verbose History Status Wait Finish Of Balancing ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} = Get Datanode Ozone Used Bytes Info ${uuid} Should Not Be Equal As Integers ${datanodeOzoneUsedBytesInfo} ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} - Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * 3.5 - Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * 3 - - - - - + #We need to ensure that after balancing, the amount of data recorded on each datanode falls within the following ranges: + #{SIZE}*3 < used < {SIZE}*3.5 for RATIS containers, and {SIZE}*0.7 < used < {SIZE}*1.5 for EC containers. + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * ${UPPER_LIMIT} + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * ${LOWER_LIMIT} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot index dc862d59c1a..c3caec2ae91 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot @@ -25,3 +25,8 @@ Picks up command line options ${processes} = List All Processes Should Contain ${processes} %{HDFS_OM_OPTS} Should Contain ${processes} %{HADOOP_OPTS} + +Rejects Atomic Key Rewrite + Execute ozone freon ockg -n1 -t1 -p rewrite + ${output} = Execute and check rc ozone sh key rewrite -t EC -r rs-3-2-1024k /vol1/bucket1/rewrite/0 255 + Should Contain ${output} Feature disabled: ATOMIC_REWRITE_KEY diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot index 511679c56f4..9061677eae6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot @@ -18,19 +18,140 @@ Documentation Read Compatibility Resource ../ozone-lib/shell.robot Resource setup.robot Test Timeout 5 minutes -Suite Setup Create Local Test File -*** Variables *** -${SUFFIX} ${EMPTY} + +*** Keywords *** +Key List With Replication + [arguments] ${args} + ${list} = Execute ozone sh key list ${args} + ${result} = Execute echo '${list}' | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")' + [return] ${result} + *** Test Cases *** +Buckets Can Be Listed + ${result} = Execute ozone sh bucket list /vol1 + Should Contain ${result} bucket1 + + IF '${CLUSTER_VERSION}' >= '${EC_VERSION}' + Should Contain ${result} ratis-${CLUSTER_VERSION} + Should Contain ${result} ecbucket-${CLUSTER_VERSION} + END + +Bucket Without Replication Config + Verify Bucket Empty Replication Config /vol1/bucket1 + +Bucket With Replication Config + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' + Verify Bucket Replica Replication Config /vol1/ratis-${CLUSTER_VERSION} RATIS THREE + Verify Bucket EC Replication Config /vol1/ecbucket-${CLUSTER_VERSION} RS 3 2 1048576 + ELSE + Verify Bucket Empty Replication Config /vol1/ratis-${CLUSTER_VERSION} + Verify Bucket Empty Replication Config /vol1/ecbucket-${CLUSTER_VERSION} + END + Key Can Be Read - Key Should Match Local File /vol1/bucket1/key-${SUFFIX} ${TESTFILE} + Key Should Match Local File /vol1/bucket1/key-${DATA_VERSION} ${TESTFILE} + +Encrypted Key Can Be Read + Key Should Match Local File /vol1/encrypted-${DATA_VERSION}/key ${TESTFILE} + File Should Match Local File ofs://om/vol1/encrypted-${DATA_VERSION}/key ${TESTFILE} + +Key Read From Bucket With Replication + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + Key Should Match Local File /vol1/ratis-${CLUSTER_VERSION}/key-${DATA_VERSION} ${TESTFILE} + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' or '${DATA_VERSION}' == '${CLIENT_VERSION}' + Key Should Match Local File /vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} ${TESTFILE} + ELSE + Assert Unsupported ozone sh key get -f /vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} /dev/null + END Dir Can Be Listed - Execute ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX} + ${result} = Execute ozone fs -ls o3fs://bucket1.vol1/dir-${DATA_VERSION} + Should contain ${result} dir-${DATA_VERSION}/file-${DATA_VERSION} + + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + +# TODO HDDS-11803 +# ${result} = Execute ozone fs -ls ofs://om/vol1/ +# Should contain ${result} /vol1/ratis-${CLUSTER_VERSION} +# Should contain ${result} /vol1/ecbucket-${CLUSTER_VERSION} + + IF '${CLIENT_VERSION}' < '${EC_VERSION}' + ${result} = Execute and checkrc ozone fs -ls ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/ 1 + Should contain ${result} ls: The list of keys contains keys with Erasure Coded replication set + END + +File Can Be Listed + ${result} = Execute ozone fs -ls o3fs://bucket1.vol1/dir-${DATA_VERSION}/file-${DATA_VERSION} + Should contain ${result} dir-${DATA_VERSION}/file-${DATA_VERSION} + + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + ${result} = Execute ozone fs -ls ofs://om/vol1/ratis-${CLUSTER_VERSION}/file-${DATA_VERSION} + Should contain ${result} /vol1/ratis-${CLUSTER_VERSION}/file-${DATA_VERSION} + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' or '${DATA_VERSION}' == '${CLIENT_VERSION}' + ${result} = Execute ozone fs -ls ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/file-${DATA_VERSION} + Should contain ${result} /vol1/ecbucket-${CLUSTER_VERSION}/file-${DATA_VERSION} + ELSE + ${result} = Execute and checkrc ozone fs -ls ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/file-${DATA_VERSION} 1 + Should contain ${result} : No such file or directory + END + +Key List + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' + ${result} = Execute ozone sh key list /vol1/bucket1 + Should Contain ${result} key-${DATA_VERSION} + ELSE IF '${DATA_VERSION}' < '${EC_VERSION}' # New client creates RATIS/ONE key by default: BUG? + ${result} = Key List With Replication /vol1/bucket1 + Should contain ${result} key-${DATA_VERSION} RATIS 3 + END + +Key List In Bucket With Replication + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + IF '${CLIENT_VERSION}' < '${EC_VERSION}' + ${result} = Key List With Replication /vol1/ratis-${CLUSTER_VERSION}/ + Should contain ${result} key-${DATA_VERSION} RATIS 3 + + Assert Unsupported ozone sh key list /vol1/ecbucket-${CLUSTER_VERSION}/ + ELSE + ${result} = Execute ozone sh key list /vol1/ratis-${CLUSTER_VERSION} + Should Contain ${result} key-${DATA_VERSION} + ${result} = Execute ozone sh key list /vol1/ecbucket-${CLUSTER_VERSION} + Should Contain ${result} key-${DATA_VERSION} + END + File Can Be Get - Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} /tmp/ - Execute diff -q ${TESTFILE} /tmp/file-${SUFFIX} - [teardown] Execute rm /tmp/file-${SUFFIX} + Key Should Match Local File /vol1/bucket1/dir-${DATA_VERSION}/file-${DATA_VERSION} ${TESTFILE} + File Should Match Local File o3fs://bucket1.vol1/dir-${DATA_VERSION}/file-${DATA_VERSION} ${TESTFILE} + +File Can Be Get From Bucket With Replication + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + File Should Match Local File ofs://om/vol1/ratis-${CLUSTER_VERSION}/file-${DATA_VERSION} ${TESTFILE} + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' or '${DATA_VERSION}' == '${CLIENT_VERSION}' + File Should Match Local File ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} ${TESTFILE} + ELSE + ${result} = Execute and checkrc ozone fs -get ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} 1 + Should contain ${result} : No such file or directory + END + +FSO Bucket Can Be Read + Pass Execution If '${DATA_VERSION}' < '${FSO_VERSION}' Skipped write test case + Pass Execution If '${CLIENT_VERSION}' < '${FSO_VERSION}' Client does not support FSO + Pass Execution If '${CLUSTER_VERSION}' < '${FSO_VERSION}' Cluster does not support FSO + File Should Match Local File ofs://om/vol1/fso-bucket-${DATA_VERSION}/dir/subdir/file ${TESTFILE} + +HSync Lease Recover Can Be Used + Pass Execution If '${DATA_VERSION}' < '${FSO_VERSION}' Skipped write test case + Pass Execution If '${CLIENT_VERSION}' < '${HSYNC_VERSION}' Client does not support HSYNC + Pass Execution If '${CLUSTER_VERSION}' < '${HSYNC_VERSION}' Cluster does not support HSYNC + Execute ozone debug recover --path=ofs://om/vol1/fso-bucket-${DATA_VERSION}/dir/subdir/file diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot index ae765f23e2b..4f41d280a6c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot @@ -19,10 +19,7 @@ Library OperatingSystem Resource ../ozone-lib/shell.robot *** Variables *** -${SUFFIX} ${EMPTY} - - -*** Keywords *** -Create Local Test File - Set Suite Variable ${TESTFILE} /tmp/test-data-${SUFFIX}.txt - Create File ${TESTFILE} Compatibility Test +${EC_VERSION} 1.3.0 +${FSO_VERSION} 1.3.0 +${HSYNC_VERSION} 2.0.0 +${TESTFILE} ${TEST_DATA_DIR}/small diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot index 4c611d4287b..0497edaca16 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot @@ -17,19 +17,65 @@ Documentation Write Compatibility Resource ../ozone-lib/shell.robot Resource setup.robot +Resource ../lib/fs.robot +Resource ../ozone-lib/freon.robot Test Timeout 5 minutes -Suite Setup Create Local Test File *** Variables *** -${SUFFIX} ${EMPTY} +${ENCRYPTION_KEY} key1 *** Test Cases *** +Create Bucket With Replication Type + Pass Execution If '${CLIENT_VERSION}' < '${EC_VERSION}' Client does not support EC + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + Execute ozone sh bucket create --replication 3 --type RATIS /vol1/ratis-${CLIENT_VERSION} + Execute ozone sh bucket create --replication rs-3-2-1024k --type EC /vol1/ecbucket-${CLIENT_VERSION} + +Create Encrypted Bucket + Execute ozone sh bucket create -k ${ENCRYPTION_KEY} /vol1/encrypted-${CLIENT_VERSION} + +Create Key in Encrypted Bucket + Execute ozone sh key put /vol1/encrypted-${CLIENT_VERSION}/key ${TESTFILE} + Key Can Be Written - Create Key /vol1/bucket1/key-${SUFFIX} ${TESTFILE} + Create Key /vol1/bucket1/key-${CLIENT_VERSION} ${TESTFILE} + +Key Can Be Written To Bucket With Replication Type + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + Execute ozone sh key put /vol1/ratis-${CLUSTER_VERSION}/key-${CLIENT_VERSION} ${TESTFILE} + Execute ozone sh key put /vol1/ecbucket-${CLUSTER_VERSION}/key-${CLIENT_VERSION} ${TESTFILE} + +Key Can Be Deleted + Create Key /vol1/bucket1/to-be-deleted-${CLIENT_VERSION} ${TESTFILE} + Execute ozone sh key delete /vol1/bucket1/to-be-deleted-${CLIENT_VERSION} Dir Can Be Created - Execute ozone fs -mkdir o3fs://bucket1.vol1/dir-${SUFFIX} + Execute ozone fs -mkdir o3fs://bucket1.vol1/dir-${CLIENT_VERSION} File Can Be Put - Execute ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} + Execute ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${CLIENT_VERSION}/file-${CLIENT_VERSION} + +File Can Be Put To Bucket With Replication Type + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + Execute ozone fs -put ${TESTFILE} ofs://om/vol1/ratis-${CLUSTER_VERSION}/file-${CLIENT_VERSION} + Execute ozone fs -put ${TESTFILE} ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/file-${CLIENT_VERSION} + +File Can Be Deleted + Execute ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${CLIENT_VERSION}/to-be-deleted + Execute ozone fs -rm -skipTrash o3fs://bucket1.vol1/dir-${CLIENT_VERSION}/to-be-deleted + +FSO Bucket Can Be Created and Used + Pass Execution If '${CLIENT_VERSION}' < '${FSO_VERSION}' Client does not support FSO + Pass Execution If '${CLUSTER_VERSION}' < '${FSO_VERSION}' Cluster does not support FSO + Execute ozone sh bucket create --layout FILE_SYSTEM_OPTIMIZED /vol1/fso-bucket-${CLIENT_VERSION} + Execute ozone fs -mkdir -p ofs://om/vol1/fso-bucket-${CLIENT_VERSION}/dir/subdir + Execute ozone fs -put ${TESTFILE} ofs://om/vol1/fso-bucket-${CLIENT_VERSION}/dir/subdir/file + +HSync Can Be Used To Create Keys + Pass Execution If '${CLIENT_VERSION}' < '${HSYNC_VERSION}' Client does not support HSYNC + Pass Execution If '${CLUSTER_VERSION}' < '${HSYNC_VERSION}' Cluster does not support HSYNC + ${o3fspath} = Format FS URL o3fs vol1 bucket1 + Freon DFSG sync=HSYNC n=1 path=${o3fspath} + ${pfspath} = Format FS URL ofs $vol1 bucket1 + Freon DFSG sync=HSYNC n=1 path=${pfspath} diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot new file mode 100644 index 00000000000..0fa43dee6c0 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot @@ -0,0 +1,146 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone debug ldb CLI +Library OperatingSystem +Resource ../lib/os.robot +Test Timeout 5 minute +Suite Setup Write keys + +*** Variables *** +${PREFIX} ${EMPTY} +${VOLUME} cli-debug-volume${PREFIX} +${BUCKET} cli-debug-bucket +${DEBUGKEY} debugKey +${TESTFILE} testfile + +*** Keywords *** +Write keys + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Execute ozone sh volume create ${VOLUME} + Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l OBJECT_STORE + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}1 bs=100 count=10 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE}1 + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}2 bs=100 count=15 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE}2 + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}3 bs=100 count=20 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE}3 + Execute ozone sh key addacl -a user:systest:a ${VOLUME}/${BUCKET}/${TESTFILE}3 + +*** Test Cases *** +Test ozone debug ldb ls + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db ls + Should contain ${output} keyTable + +Test ozone debug ldb scan + # test count option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --count + Should Not Be Equal ${output} 0 + # test valid json for scan command + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable | jq -r '.' + Should contain ${output} keyName + Should contain ${output} testfile1 + Should contain ${output} testfile2 + Should contain ${output} testfile3 + # test key is included with --with-keys + ${output1} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + ${output2} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --with-keys | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + ${output3} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --with-keys=true | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + Should contain ${output1} testfile1 + Should Be Equal ${output1} ${output2} + Should Be Equal ${output1} ${output3} + # test key is ommitted with --with-keys set to false + ${output} = Execute and Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --with-keys=false | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + Should contain ${output} Cannot index array with string + # test startkey option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --startkey="/cli-debug-volume/cli-debug-bucket/testfile2" + Should not contain ${output} testfile1 + Should contain ${output} testfile2 + Should contain ${output} testfile3 + # test endkey option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --endkey="/cli-debug-volume/cli-debug-bucket/testfile2" + Should contain ${output} testfile1 + Should contain ${output} testfile2 + Should not contain ${output} testfile3 + # test fields option + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --fields="volumeName,bucketName,keyName" + Should contain ${output} volumeName + Should contain ${output} bucketName + Should contain ${output} keyName + Should not contain ${output} objectID + Should not contain ${output} dataSize + Should not contain ${output} keyLocationVersions + +Test ozone debug ldb scan with filter option success + # test filter option with one filter + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile2" + Should not contain ${output} testfile1 + Should contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with one multi-level filter + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should contain ${output} testfile3 + # test filter option with multiple filter + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile3,acls.name:equals:systest" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should contain ${output} testfile3 + # test filter option with no records match both filters + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="acls.name:equals:systest,keyName:equals:testfile2" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option for size > 1200 + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:greater:1200" + Should not contain ${output} testfile1 + Should contain ${output} testfile2 + Should contain ${output} testfile3 + # test filter option for size < 1200 + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesser:1200" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with no records match both filters + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesser:1200,keyName:equals:testfile2" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with regex matching numbers + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:regex:^1[0-2]{3}$" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with regex matching string + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:regex:^test.*[0-1]$" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + +Test ozone debug ldb scan with filter option failure + # test filter option with invalid operator + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesserthan:1200" + Should contain ${output} Error: Invalid operator + # test filter option with invalid format + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:1200" + Should contain ${output} Error: Invalid format + # test filter option with invalid field + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="size:equals:1200" + Should contain ${output} Error: Invalid field + # test filter option for lesser/greater operator on non-numeric field + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:lesser:k1" + Should contain ${output} only on numeric values diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot index f867ee99f64..691769dbd72 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot @@ -17,11 +17,13 @@ Documentation Test lease recovery of ozone filesystem Library OperatingSystem Resource ../lib/os.robot +Resource ../lib/fs.robot Resource ozone-debug.robot Test Timeout 5 minute Suite Setup Create volume bucket and put key *** Variables *** +${OM_SERVICE_ID} %{OM_SERVICE_ID} ${VOLUME} lease-recovery-volume ${BUCKET} lease-recovery-bucket ${TESTFILE} testfile22 @@ -35,13 +37,17 @@ Create volume bucket and put key *** Test Cases *** Test ozone debug recover for o3fs - ${result} = Execute Lease recovery cli o3fs://${BUCKET}.${VOLUME}.om/${TESTFILE} - Should Contain ${result} Lease recovery SUCCEEDED - ${result} = Execute Lease recovery cli o3fs://${BUCKET}.${VOLUME}.om/randomfile - Should Contain ${result} not found + ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} ${TESTFILE} + ${result} = Execute Lease recovery cli ${o3fs_path} + Should Contain ${result} Lease recovery SUCCEEDED + ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} randomfile + ${result} = Execute Lease recovery cli ${o3fs_path} + Should Contain ${result} not found Test ozone debug recover for ofs - ${result} = Execute Lease recovery cli ofs://om/${VOLUME}/${BUCKET}/${TESTFILE} - Should Contain ${result} Lease recovery SUCCEEDED - ${result} = Execute Lease recovery cli ofs://om/${VOLUME}/${BUCKET}/randomfile - Should Contain ${result} not found + ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} ${TESTFILE} + ${result} = Execute Lease recovery cli ${ofs_path} + Should Contain ${result} Lease recovery SUCCEEDED + ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} randomfile + ${result} = Execute Lease recovery cli ${ofs_path} + Should Contain ${result} not found diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot index ca1995bf3a1..4e013e2a64b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot @@ -49,3 +49,8 @@ Test ozone debug read-replicas FOR ${replica} IN RANGE 3 Verify Healthy Replica ${json} ${replica} ${md5sum} END + + +Test ozone debug version + ${output} = Execute ozone debug version + Execute echo '${output}' | jq -r '.' # validate JSON diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot b/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot deleted file mode 100644 index d64c7686870..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test EC backward compatibility -Library OperatingSystem -Resource lib.resource - -*** Variables *** -${PREFIX} ${EMPTY} -${VOLUME} vol${PREFIX} - -*** Test Cases *** -Setup Cluster Data - [Tags] setup-ec-data - Prepare Data For Xcompat Tests - -Test Read Key Compat - [Tags] test-ec-compat - Key Should Match Local File /${VOLUME}/ratis/3mb /tmp/3mb - Key Should Match Local File /${VOLUME}/default/3mb /tmp/3mb - - ${result} = Execute and checkrc ozone sh key get -f /${VOLUME}/ecbucket/3mb /dev/null 255 - Should Contain ${result} NOT_SUPPORTED_OPERATION - -Test Listing Compat - [Tags] test-ec-compat - ${result} = Execute ozone sh volume list | jq -r '.name' - Should contain ${result} ${VOLUME} - ${result} = Execute ozone sh bucket list /${VOLUME}/ | jq -r '.name' - Should contain ${result} default - Should contain ${result} ratis - Should contain ${result} ec - ${result} = Execute ozone sh key list /${VOLUME}/default/ | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")' - Should contain ${result} 3mb RATIS 3 - ${result} = Execute ozone sh key list /${VOLUME}/ratis/ | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")' - Should contain ${result} 3mb RATIS 3 - - ${result} = Execute and checkrc ozone sh key list /${VOLUME}/ecbucket/ 255 - Should contain ${result} NOT_SUPPORTED_OPERATION - -Test Info Compat - [Tags] test-ec-compat - ${result} = Execute ozone sh volume info ${VOLUME} | jq -r '.name' - Should contain ${result} ${VOLUME} - ${result} = Execute ozone sh bucket info /${VOLUME}/default | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")' - Should contain ${result} default # there is no replication config in the old client for bucket info - ${result} = Execute ozone sh bucket info /${VOLUME}/ratis | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")' - Should contain ${result} ratis # there is no replication config in the old client for bucket info - ${result} = Execute ozone sh bucket info /${VOLUME}/ecbucket | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")' - Should contain ${result} ec # there is no replication config in the old client for bucket info - -Test FS Compat - [Tags] test-ec-compat - ${result} = Execute ozone fs -ls ofs://om/ - Should contain ${result} /${VOLUME} - ${result} = Execute ozone fs -ls ofs://om/${VOLUME}/ - Should contain ${result} /${VOLUME}/default - Should contain ${result} /${VOLUME}/ratis - Should contain ${result} /${VOLUME}/ecbucket - ${result} = Execute ozone fs -ls ofs://om/${VOLUME}/default/3mb - Should contain ${result} /${VOLUME}/default/3mb - ${result} = Execute ozone fs -ls ofs://om/${VOLUME}/ratis/3mb - Should contain ${result} /${VOLUME}/ratis/3mb - - ${result} = Execute and checkrc ozone fs -ls ofs://om/${VOLUME}/ecbucket/ 1 - Should contain ${result} ls: The list of keys contains keys with Erasure Coded replication set - ${result} = Execute and checkrc ozone fs -ls ofs://om/${VOLUME}/ecbucket/3mb 1 - Should contain ${result} : No such file or directory - ${result} = Execute and checkrc ozone fs -get ofs://om/${VOLUME}/ecbucket/3mb 1 - Should contain ${result} : No such file or directory - -Test FS Client Can Read Own Writes - [Tags] test-ec-compat - Execute ozone fs -put /tmp/1mb ofs://om/${VOLUME}/default/1mb - Execute ozone fs -put /tmp/1mb ofs://om/${VOLUME}/ratis/1mb - Execute ozone fs -put /tmp/1mb ofs://om/${VOLUME}/ecbucket/1mb - Key Should Match Local File /${VOLUME}/ratis/1mb /tmp/1mb - Key Should Match Local File /${VOLUME}/ratis/1mb /tmp/1mb - Key Should Match Local File /${VOLUME}/ratis/1mb /tmp/1mb - Execute ozone fs -rm -skipTrash ofs://om/${VOLUME}/default/1mb - Execute ozone fs -rm -skipTrash ofs://om/${VOLUME}/ratis/1mb - Execute ozone fs -rm -skipTrash ofs://om/${VOLUME}/ecbucket/1mb - -Test Client Can Read Own Writes - [Tags] test-ec-compat - Execute ozone sh key put /${VOLUME}/default/2mb /tmp/2mb - Execute ozone sh key put /${VOLUME}/ratis/2mb /tmp/2mb - Execute ozone sh key put /${VOLUME}/ecbucket/2mb /tmp/2mb - Key Should Match Local File /${VOLUME}/ratis/2mb /tmp/2mb - Key Should Match Local File /${VOLUME}/ratis/2mb /tmp/2mb - Key Should Match Local File /${VOLUME}/ratis/2mb /tmp/2mb - Execute ozone sh key delete /${VOLUME}/default/2mb - Execute ozone sh key delete /${VOLUME}/ratis/2mb - Execute ozone sh key delete /${VOLUME}/ecbucket/2mb diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource b/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource index f6a84f9e065..f01ec191f55 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource +++ b/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource @@ -21,8 +21,6 @@ Resource ../ozone-lib/shell.robot *** Variables *** ${SCM} scm -${PREFIX} ${EMPTY} -${VOLUME} vol${PREFIX} *** Keywords *** Prepare For Tests @@ -31,13 +29,3 @@ Prepare For Tests Execute dd if=/dev/urandom of=/tmp/3mb bs=1048576 count=3 Execute dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100 Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab - -# xcompat/test.sh creates unified test data files in /tmp for client containers -Prepare Data For Xcompat Tests - Execute ozone sh volume create /${VOLUME} - Execute ozone sh bucket create /${VOLUME}/default - Execute ozone sh bucket create --replication 3 --type RATIS /${VOLUME}/ratis - Execute ozone sh bucket create --replication rs-3-2-1024k --type EC /${VOLUME}/ecbucket - Execute ozone sh key put /${VOLUME}/default/3mb /tmp/3mb - Execute ozone sh key put /${VOLUME}/ratis/3mb /tmp/3mb - Execute ozone sh key put /${VOLUME}/ecbucket/3mb /tmp/3mb diff --git a/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot b/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot new file mode 100644 index 00000000000..c8462124427 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/freon/hsync.robot @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test HSync via freon CLI. +Library OperatingSystem +Library String +Library BuiltIn +Resource ../ozone-lib/freon.robot +Resource ../lib/fs.robot +Test Timeout 10 minutes +Suite Setup Create volume and bucket + +*** Variables *** +${OM_SERVICE_ID} %{OM_SERVICE_ID} +${VOLUME} hsync-volume +${BUCKET} hsync-bucket + +*** Keywords *** +Create volume and bucket + Execute ozone sh volume create /${volume} + Execute ozone sh bucket create /${volume}/${bucket} + +*** Test Cases *** +Generate key for o3fs by HSYNC + ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} + Freon DFSG sync=HSYNC path=${path} + +Generate key for o3fs by HFLUSH + ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} + Freon DFSG sync=HFLUSH path=${path} + +Generate key for ofs by HSYNC + ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} + Freon DFSG sync=HSYNC path=${path} + +Generate key for ofs by HFLUSH + ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} + Freon DFSG sync=HFLUSH path=${path} diff --git a/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot b/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot new file mode 100644 index 00000000000..1250ad1344e --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/hsync/upgrade-hsync-check.robot @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test HSync during upgrade +Library OperatingSystem +Library String +Library BuiltIn +Resource ../commonlib.robot +Resource ../lib/fs.robot +Resource ../debug/ozone-debug.robot +Default Tags pre-finalized-hsync-tests +Suite Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + +*** Variables *** +${OM_SERVICE_ID} %{OM_SERVICE_ID} +${VOLUME} upgrade-hsync-volume +${BUCKET} upgrade-hsync-bucket +${KEY} upgrade-hsync-key + +*** Keywords *** +Create volume bucket and put key + Execute ozone sh volume create /${volume} + Execute ozone sh bucket create /${volume}/${bucket} + Execute ozone sh key put /${volume}/${bucket}/${key} /etc/hosts + +Freon DFSG + [arguments] ${prefix}=dfsg ${n}=1000 ${path}={EMPTY} ${sync}=HSYNC ${buffer}=1024 ${copy-buffer}=1024 ${size}=10240 + ${result} = Execute and checkrc ozone freon dfsg -n ${n} --sync ${sync} -s ${size} --path ${path} --buffer ${buffer} --copy-buffer ${copy-buffer} -p ${prefix} 255 + Should contain ${result} NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION + +*** Test Cases *** +Test HSync lease recover prior to finalization + Create volume bucket and put key + ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} ${KEY} + ${result} = Execute and checkrc ozone debug recover --path=${o3fs_path} 255 + Should contain ${result} It belongs to the layout feature HBASE_SUPPORT, whose layout version is 7 + ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} ${KEY} + ${result} = Execute and checkrc ozone debug recover --path=${ofs_path} 255 + Should contain ${result} It belongs to the layout feature HBASE_SUPPORT, whose layout version is 7 + +Generate key for o3fs by HSYNC prior to finalization + ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} + Freon DFSG sync=HSYNC path=${path} + +Generate key for o3fs by HFLUSH prior to finalization + ${path} = Format FS URL o3fs ${VOLUME} ${BUCKET} + Freon DFSG sync=HFLUSH path=${path} + +Generate key for ofs by HSYNC prior to finalization + ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} + Freon DFSG sync=HSYNC path=${path} + +Generate key for ofs by HFLUSH prior to finalization + ${path} = Format FS URL ofs ${VOLUME} ${BUCKET} + Freon DFSG sync=HFLUSH path=${path} diff --git a/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot b/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot index 0c688865193..3977e053608 100644 --- a/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot +++ b/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot @@ -22,5 +22,5 @@ Test Timeout 5 minutes *** Test Cases *** Check om admin command - ${result} = Execute and checkrc ozone admin om roles -id=omServiceIdDefault 0 + ${result} = Execute and ignore error ozone admin om roles -id=omServiceIdDefault Should Contain ${result} This command works only on OzoneManager HA cluster. diff --git a/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot b/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot index 54e44bce36b..3513ec12de1 100644 --- a/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot +++ b/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot @@ -28,6 +28,9 @@ Assert Leader Present in JSON [Arguments] ${output} ${leader} = Execute echo '${output}' | jq '.[] | select(.[] | .serverRole == "LEADER")' Should Not Be Equal ${leader} ${EMPTY} +Assert Leader Present in TABLE + [Arguments] ${output} + Should Match Regexp ${output} \\|.*LEADER.* *** Test Cases *** List om roles with OM service ID passed @@ -53,3 +56,15 @@ List om roles as JSON without OM service ID passed Assert Leader Present in JSON ${output_without_id_passed} ${output_without_id_passed} = Execute And Ignore Error ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --json Should Contain ${output_without_id_passed} no Ozone Manager service ID specified + +List om roles as TABLE with OM service ID passed + ${output_with_id_passed} = Execute ozone admin om roles --service-id=omservice --table + Assert Leader Present in TABLE ${output_with_id_passed} + ${output_with_id_passed} = Execute ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --service-id=omservice --table + Assert Leader Present in TABLE ${output_with_id_passed} + +List om roles as TABLE without OM service ID passed + ${output_without_id_passed} = Execute ozone admin om roles --table + Assert Leader Present in TABLE ${output_without_id_passed} + ${output_without_id_passed} = Execute And Ignore Error ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --table + Should Contain ${output_without_id_passed} no Ozone Manager service ID specified \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot index 8d10cc81e90..b813c9ed411 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/freon.robot @@ -67,3 +67,9 @@ Freon OMBR [arguments] ${prefix}=ombg ${n}=1 ${threads}=1 ${args}=${EMPTY} ${result} = Execute ozone freon ombr ${OM_HA_PARAM} -t ${threads} -n${n} -p ${prefix} ${args} Should contain ${result} Successful executions: ${n} + +Freon DFSG + [arguments] ${prefix}=dfsg ${n}=1000 ${path}={EMPTY} ${threads}=1 ${sync}=HSYNC ${buffer}=1024 ${copy-buffer}=1024 ${size}=10240 ${args}=${EMPTY} + ${result} = Execute ozone freon dfsg -n ${n} --sync ${sync} -s ${size} --path ${path} --buffer ${buffer} --copy-buffer ${copy-buffer} -p ${prefix} -t ${threads} ${args} + Should contain ${result} Successful executions: ${n} + diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot index 2b54e8bf330..d5762f912e7 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot @@ -28,10 +28,10 @@ Bucket Exists [Return] ${TRUE} Compare Key With Local File - [arguments] ${key} ${file} + [arguments] ${key} ${file} ${cmd}=sh key get ${postfix} = Generate Random String 5 [NUMBERS] ${tmpfile} = Set Variable /tmp/tempkey-${postfix} - Execute ozone sh key get ${key} ${tmpfile} + Execute ozone ${cmd} ${key} ${tmpfile} ${rc} = Run And Return Rc diff -q ${file} ${tmpfile} Execute rm -f ${tmpfile} ${result} = Set Variable If ${rc} == 0 ${TRUE} ${FALSE} @@ -42,6 +42,11 @@ Key Should Match Local File ${matches} = Compare Key With Local File ${key} ${file} Should Be True ${matches} +File Should Match Local File + [arguments] ${key} ${file} + ${matches} = Compare Key With Local File ${key} ${file} fs -get + Should Be True ${matches} + Verify ACL [arguments] ${object_type} ${object} ${type} ${name} ${acls} ${actual_acls} = Execute ozone sh ${object_type} getacl ${object} | jq -r '.[] | select(.type == "${type}") | select(.name == "${name}") | .aclList[]' | xargs @@ -70,6 +75,11 @@ Create Key Should not contain ${output} Failed Log Uploaded ${file} to ${key} +Assert Unsupported + [arguments] ${cmd} + ${result} = Execute and checkrc ${cmd} 255 + Should Contain ${result} NOT_SUPPORTED_OPERATION + Verify Bucket Empty Replication Config [arguments] ${bucket} ${result} = Execute ozone sh bucket info ${bucket} | jq -r '.replicationConfig' diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot index 22805efcb1b..651cda016f2 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell_tests.robot @@ -56,3 +56,11 @@ Compare Key With Local File with Different File Compare Key With Local File if File Does Not Exist ${matches} = Compare Key With Local File o3://${OM_SERVICE_ID}/vol1/bucket/passwd /no-such-file Should Be Equal ${matches} ${FALSE} + +Rejects Put Key With Zero Expected Generation + ${output} = Execute and checkrc ozone sh key put --expectedGeneration 0 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255 + Should Contain ${output} must be positive + +Rejects Put Key With Negative Expected Generation + ${output} = Execute and checkrc ozone sh key put --expectedGeneration -1 o3://${OM_SERVICE_ID}/vol1/bucket/passwd /etc/passwd 255 + Should Contain ${output} must be positive diff --git a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot index 43860b75d99..883e93bdc5c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot +++ b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot @@ -28,6 +28,8 @@ ${API_ENDPOINT_URL} ${ENDPOINT_URL}/api/v1 ${ADMIN_API_ENDPOINT_URL} ${API_ENDPOINT_URL}/containers ${UNHEALTHY_ENDPOINT_URL} ${API_ENDPOINT_URL}/containers/unhealthy ${NON_ADMIN_API_ENDPOINT_URL} ${API_ENDPOINT_URL}/clusterState +${VOLUME} vol1 +${BUCKET} bucket1 *** Keywords *** Check if Recon picks up container from OM @@ -57,6 +59,15 @@ Check http return code Should contain ${result} 200 END +Check if the listKeys api responds OK + [Arguments] ${volume} ${bucket} + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit as ozone admin + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/keys/listKeys?startPrefix=/${volume}/${bucket}&limit=1000 + Should contain ${result} "OK" + Should contain ${result} "keys" + Should contain ${result} "${volume}" + Should contain ${result} "${bucket}" + *** Test Cases *** Check if Recon picks up OM data Execute ozone sh volume create recon @@ -67,6 +78,7 @@ Check if Recon picks up OM data Execute ozone sh bucket create recon/api --layout=LEGACY Freon OCKG n=10 args=-s 1025 -v recon -b api Wait Until Keyword Succeeds 90sec 10sec Check if Recon picks up container from OM + Wait Until Keyword Succeeds 90sec 10sec Check if the listKeys api responds OK recon api Check if Recon picks up DN heartbeats ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/datanodes diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index dd06d55f75f..e630fe6cdae 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -20,6 +20,7 @@ Library String Library DateTime Resource ../commonlib.robot Resource commonawslib.robot +Resource mpu_lib.robot Test Timeout 5 minutes Suite Setup Setup Multipart Tests Suite Teardown Teardown Multipart Tests @@ -61,17 +62,8 @@ Test Multipart Upload With Adjusted Length Verify Multipart Upload ${BUCKET} multipart/adjusted_length_${PREFIX} /tmp/part1 /tmp/part2 Test Multipart Upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId -# initiate again - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - ${nextUploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey + ${nextUploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey Should Not Be Equal ${uploadID} ${nextUploadID} # upload part @@ -79,41 +71,24 @@ Test Multipart Upload # upload we get error entity too small. So, considering further complete # multipart upload, uploading each part as 5MB file, exception is for last part - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag -# override part - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag + Upload MPU part ${BUCKET} ${PREFIX}/multipartKey ${nextUploadID} 1 /tmp/part1 + Upload MPU part ${BUCKET} ${PREFIX}/multipartKey ${nextUploadID} 1 /tmp/part1 Test Multipart Upload Complete - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" --tagging="tag-key1=tag-value1&tag-key2=tag-value2" - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey1 0 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" --tagging="tag-key1=tag-value1&tag-key2=tag-value2" -#upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - ${part1Md5Sum} = Execute md5sum /tmp/part1 | awk '{print $1}' - Should Be Equal As Strings ${eTag1} ${part1Md5Sum} - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' - Should Be Equal As Strings ${eTag2} ${part2Md5Sum} + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} 2 /tmp/part2 + +#complete multipart upload without any parts + ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 255 + Should contain ${result} InvalidRequest + Should contain ${result} must specify at least one part #complete multipart upload - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey1 - ${resultETag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${resultETag} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} ${expectedResultETag} = Execute echo -n ${eTag1}${eTag2} | md5sum | awk '{print $1}' - Should contain ${result} ETag Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2" #check whether the user defined metadata and parts count can be retrieved @@ -158,116 +133,69 @@ Test Multipart Upload Complete Test Multipart Upload with user defined metadata size larger than 2 KB ${custom_metadata_value} = Generate Random String 3000 - ${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/mpuWithLargeMetadata --metadata="custom-key1=${custom_metadata_value}" 255 + ${result} = Initiate MPU ${BUCKET} ${PREFIX}/mpuWithLargeMetadata 255 --metadata="custom-key1=${custom_metadata_value}" Should contain ${result} MetadataTooLarge Should not contain ${result} custom-key1: ${custom_metadata_value} Test Multipart Upload Complete Entity too small - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId - -#upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --part-number 1 --body /tmp/10kb --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --part-number 2 --body /tmp/10kb --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - -#complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' 255 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey2 + ${parts} = Upload MPU parts ${BUCKET} ${PREFIX}/multipartKey2 ${uploadID} /tmp/10kb /tmp/10kb + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey2 ${uploadID} ${parts} 255 Should contain ${result} EntityTooSmall Test Multipart Upload Complete Invalid part errors and complete mpu with few parts - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey3 #complete multipart upload when no parts uploaded - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=2},{ETag=etag2,PartNumber=1}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=2},{ETag=etag2,PartNumber=1} 255 Should contain ${result} InvalidPart #upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 2 --body /tmp/part1 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part3" > /tmp/part3 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 --body /tmp/part3 --upload-id ${uploadID} - ${eTag3} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 2 /tmp/part1 + ${eTag3} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 3 /tmp/part2 #complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=4},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=4},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPartOrder #complete multipart upload(merge with few parts) - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag3},PartNumber=3}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey3 - Should contain ${result} ETag + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag3},PartNumber=3} ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 /tmp/${PREFIX}-multipartKey3.result - Execute cat /tmp/part1 /tmp/part3 > /tmp/${PREFIX}-multipartKey3 + Execute cat /tmp/part1 /tmp/part2 > /tmp/${PREFIX}-multipartKey3 Compare files /tmp/${PREFIX}-multipartKey3 /tmp/${PREFIX}-multipartKey3.result ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 1 /tmp/${PREFIX}-multipartKey3-part1.result Compare files /tmp/part1 /tmp/${PREFIX}-multipartKey3-part1.result - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 /tmp/${PREFIX}-multipartKey3-part3.result - Compare files /tmp/part3 /tmp/${PREFIX}-multipartKey3-part3.result + ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 /tmp/${PREFIX}-multipartKey3-part2.result + Compare files /tmp/part2 /tmp/${PREFIX}-multipartKey3-part2.result Test abort Multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey4 --storage-class REDUCED_REDUNDANCY - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey4 --upload-id ${uploadID} 0 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey4 0 --storage-class REDUCED_REDUNDANCY + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey4 ${uploadID} 0 Test abort Multipart upload with invalid uploadId - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id "random" 255 + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey5 "random" 255 Upload part with Incorrect uploadID - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - Execute echo "Multipart upload" > /tmp/testfile - ${result} = Execute AWSS3APICli and checkrc upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/testfile --upload-id "random" 255 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey + ${result} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey "no-such-upload-id" 1 /tmp/10kb 255 Should contain ${result} NoSuchUpload Test list parts #initiate multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey5 #upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 2 /tmp/part2 #list parts ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id ${uploadID} @@ -290,7 +218,7 @@ Test list parts Should contain ${result} STANDARD #finally abort it - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id ${uploadID} 0 + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 0 Test Multipart Upload with the simplified aws s3 cp API Execute AWSS3Cli cp /tmp/22mb s3://${BUCKET}/mpyawscli @@ -302,19 +230,14 @@ Test Multipart Upload Put With Copy ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copytest/source --body /tmp/part1 - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copytest/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId - + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copytest/destination ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key ${PREFIX}/copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/${PREFIX}/copytest/source Should contain ${result} ETag Should contain ${result} LastModified ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copytest/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]' + Complete MPU ${BUCKET} ${PREFIX}/copytest/destination ${uploadID} {ETag=${eTag1},PartNumber=1} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copytest/destination /tmp/part-result Compare files /tmp/part1 /tmp/part-result @@ -323,11 +246,7 @@ Test Multipart Upload Put With Copy and range ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source --body /tmp/10mb - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copyrange/destination ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/${PREFIX}/copyrange/source --copy-source-range bytes=0-10485757 Should contain ${result} ETag @@ -340,7 +259,7 @@ Test Multipart Upload Put With Copy and range ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Complete MPU ${BUCKET} ${PREFIX}/copyrange/destination ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination /tmp/part-result Compare files /tmp/10mb /tmp/part-result @@ -352,11 +271,7 @@ Test Multipart Upload Put With Copy and range with IfModifiedSince ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source --body /tmp/10mb - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copyrange/destination #calc time-to-sleep from time-last-modified plus a few seconds ${result} = Execute AWSS3APICli head-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source @@ -391,24 +306,14 @@ Test Multipart Upload Put With Copy and range with IfModifiedSince ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Complete MPU ${BUCKET} ${PREFIX}/copyrange/destination ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination /tmp/part-result Compare files /tmp/10mb /tmp/part-result Test Multipart Upload list - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/listtest/key1 - ${uploadID1} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/listtest/key1 - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/listtest/key2 - ${uploadID2} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/listtest/key2 - Should contain ${result} UploadId + ${uploadID1} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key1 + ${uploadID2} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key2 ${result} = Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest Should contain ${result} ${uploadID1} diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot index 76e0cadf372..39ddbde41b0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot @@ -42,13 +42,14 @@ Create bucket with invalid bucket name ${result} = Execute AWSS3APICli and checkrc create-bucket --bucket invalid_bucket_${randStr} 255 Should contain ${result} InvalidBucketName -Create new bucket and check no group ACL +Create new bucket and check default group ACL ${bucket} = Create bucket ${acl} = Execute ozone sh bucket getacl s3v/${bucket} ${group} = Get Regexp Matches ${acl} "GROUP" - IF '${group}' is not '[]' + IF '${group}' != '[]' ${json} = Evaluate json.loads('''${acl}''') json # make sure this check is for group acl Should contain ${json}[1][type] GROUP - Should contain ${json}[1][aclList] NONE - END \ No newline at end of file + Should contain ${json}[1][aclList] READ + Should contain ${json}[1][aclList] LIST + END diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot index 607a7dee960..a382970a6de 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot @@ -19,6 +19,7 @@ Library OperatingSystem Library String Resource ../commonlib.robot Resource commonawslib.robot +Resource mpu_lib.robot Test Timeout 5 minutes Suite Setup Setup s3 tests @@ -48,17 +49,13 @@ Delete bucket with incomplete multipart uploads [tags] no-bucket-type ${bucket} = Create bucket - # initiate incomplete multipart uploads (multipart upload is initiated but not completed/aborted) - ${initiate_result} = Execute AWSS3APICli create-multipart-upload --bucket ${bucket} --key incomplete-multipartkey - ${uploadID} = Execute echo '${initiate_result}' | jq -r '.UploadId' - Should contain ${initiate_result} ${bucket} - Should contain ${initiate_result} incomplete-multipartkey - Should contain ${initiate_result} UploadId + # initiate incomplete multipart upload (multipart upload is initiated but not completed/aborted) + ${uploadID} = Initiate MPU ${bucket} incomplete-multipartkey # bucket deletion should fail since there is still incomplete multipart upload ${delete_fail_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 255 Should contain ${delete_fail_result} BucketNotEmpty # after aborting the multipart upload, the bucket deletion should succeed - ${abort_result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${bucket} --key incomplete-multipartkey --upload-id ${uploadID} 0 - ${delete_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 0 \ No newline at end of file + ${abort_result} = Abort MPU ${bucket} incomplete-multipartkey ${uploadID} + ${delete_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 45dee9270bd..ac64ee36537 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -24,7 +24,7 @@ ${OZONE_S3_HEADER_VERSION} v4 ${OZONE_S3_SET_CREDENTIALS} true ${BUCKET} generated ${BUCKET_LAYOUT} OBJECT_STORE -${KEY_NAME} key1 +${ENCRYPTION_KEY} key1 ${OZONE_S3_TESTS_SET_UP} ${FALSE} ${OZONE_AWS_ACCESS_KEY_ID} ${EMPTY} ${OZONE_S3_ADDRESS_STYLE} path @@ -156,7 +156,7 @@ Create encrypted bucket Return From Keyword if '${SECURITY_ENABLED}' == 'false' ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/encrypted Return From Keyword If ${exists} - Execute ozone sh bucket create -k ${KEY_NAME} --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/encrypted + Execute ozone sh bucket create -k ${ENCRYPTION_KEY} --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/encrypted Create link [arguments] ${bucket} @@ -172,33 +172,6 @@ Generate random prefix ${random} = Generate Ozone String Set Global Variable ${PREFIX} ${random} -Perform Multipart Upload - [arguments] ${bucket} ${key} @{files} - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${bucket} --key ${key} - ${upload_id} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - - @{etags} = Create List - FOR ${i} ${file} IN ENUMERATE @{files} - ${part} = Evaluate ${i} + 1 - ${result} = Execute AWSS3APICli upload-part --bucket ${bucket} --key ${key} --part-number ${part} --body ${file} --upload-id ${upload_id} - ${etag} = Execute echo '${result}' | jq -r '.ETag' - Append To List ${etags} {ETag=${etag},PartNumber=${part}} - END - - ${parts} = Catenate SEPARATOR=, @{etags} - Execute AWSS3APICli complete-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} --multipart-upload 'Parts=[${parts}]' - -Verify Multipart Upload - [arguments] ${bucket} ${key} @{files} - - ${random} = Generate Ozone String - - Execute AWSS3APICli get-object --bucket ${bucket} --key ${key} /tmp/verify${random} - ${tmp} = Catenate @{files} - Execute cat ${tmp} > /tmp/original${random} - Compare files /tmp/original${random} /tmp/verify${random} - Revoke S3 secrets Execute and Ignore Error ozone s3 revokesecret -y Execute and Ignore Error ozone s3 revokesecret -y -u testuser diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot new file mode 100644 index 00000000000..0aaa0affec1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot @@ -0,0 +1,105 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Keywords for Multipart Upload +Library OperatingSystem +Library String +Resource commonawslib.robot + +*** Keywords *** + +Initiate MPU + [arguments] ${bucket} ${key} ${expected_rc}=0 ${opts}=${EMPTY} + + ${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${bucket} --key ${key} ${opts} ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ${bucket} + Should contain ${result} ${key} + ${upload_id} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 + RETURN ${upload_id} + ELSE + RETURN ${result} + END + + +Upload MPU part + [arguments] ${bucket} ${key} ${upload_id} ${part} ${file} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc upload-part --bucket ${bucket} --key ${key} --part-number ${part} --body ${file} --upload-id ${upload_id} ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ETag + ${etag} = Execute echo '${result}' | jq -r '.ETag' + ${md5sum} = Execute md5sum ${file} | awk '{print $1}' + Should Be Equal As Strings ${etag} ${md5sum} + RETURN ${etag} + ELSE + RETURN ${result} + END + + +Complete MPU + [arguments] ${bucket} ${key} ${upload_id} ${parts} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} --multipart-upload 'Parts=[${parts}]' ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ${bucket} + Should contain ${result} ${key} + Should contain ${result} ETag + ${etag} = Execute echo '${result}' | jq -r '.ETag' + RETURN ${etag} + ELSE + RETURN ${result} + END + + +Abort MPU + [arguments] ${bucket} ${key} ${upload_id} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} ${expected_rc} + + +Upload MPU parts + [arguments] ${bucket} ${key} ${upload_id} @{files} + + @{etags} = Create List + FOR ${i} ${file} IN ENUMERATE @{files} + ${part} = Evaluate ${i} + 1 + ${etag} = Upload MPU part ${bucket} ${key} ${upload_id} ${part} ${file} + Append To List ${etags} {ETag=${etag},PartNumber=${part}} + END + ${parts} = Catenate SEPARATOR=, @{etags} + + RETURN ${parts} + + +Perform Multipart Upload + [arguments] ${bucket} ${key} @{files} + + ${upload_id} = Initiate MPU ${bucket} ${key} + ${parts} = Upload MPU parts ${bucket} ${key} ${upload_id} @{files} + Complete MPU ${bucket} ${key} ${upload_id} ${parts} + + +Verify Multipart Upload + [arguments] ${bucket} ${key} @{files} + + ${random} = Generate Ozone String + + Execute AWSS3APICli get-object --bucket ${bucket} --key ${key} /tmp/verify${random} + ${tmp} = Catenate @{files} + Execute cat ${tmp} > /tmp/original${random} + Compare files /tmp/original${random} /tmp/verify${random} + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot index fead57ca31c..96fd1e62fc8 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot @@ -31,19 +31,19 @@ Put object s3a simulation Execute echo "Randomtext" > /tmp/testfile ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt 255 ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/word.txt/ - Should Not contain ${result} word.txt + Should Not Match Regexp ${result} "Key".*word.txt ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 255 ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/word.txt._COPYING_/ - Should Not contain ${result} word.txt._COPYING_ + Should Not Match Regexp ${result} "Key".*word.txt._COPYING_ ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ --body /tmp/testfile Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 0 Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt 255 ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/word.txt/ - Should Not contain ${result} word.txt._COPYING_ + Should Not Match Regexp ${result} "Key".*word.txt._COPYING_ ${result} = Execute AWSS3ApiCli copy-object --bucket ${BUCKET} --key ${PREFIX}/word.txt --copy-source ${BUCKET}/${PREFIX}/word.txt._COPYING_ Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt 0 Execute AWSS3APICli delete-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ - Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 255 \ No newline at end of file + Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 255 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 12fb985348a..82a985f1d50 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -44,6 +44,7 @@ Put object to s3 Get object from s3 ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile.result Compare files /tmp/testfile /tmp/testfile.result + Should not contain ${result} TagCount ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte /tmp/zerobyte.result Compare files /tmp/zerobyte /tmp/zerobyte.result diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot new file mode 100644 index 00000000000..9098673680d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +*** Settings *** +Documentation S3 gateway test with aws cli +Library OperatingSystem +Library String +Resource ../commonlib.robot +Resource commonawslib.robot +Test Timeout 5 minutes +Suite Setup Setup s3 tests + +*** Variables *** +${ENDPOINT_URL} http://s3g:9878 +${OZONE_TEST} true +${BUCKET} generated + + +*** Test Cases *** + +Put object tagging +# Create an object and call put-object-tagging + Execute echo "Randomtext" > /tmp/testfile + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --body /tmp/testfile + ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/putobject/key=value/ + Should contain ${result} f1 + + ${result} = Execute AWSS3ApiCli put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --tagging '{"TagSet": [{ "Key": "tag-key1", "Value": "tag-value1" }]}' + ${result} = Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile2.result + Should contain ${result} TagCount + ${tagCount} = Execute and checkrc echo '${result}' | jq -r '.TagCount' 0 + Should Be Equal ${tagCount} 1 + +# Calling put-object-tagging again to overwrite the existing tags + ${result} = Execute AWSS3ApiCli put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --tagging '{"TagSet": [{ "Key": "tag-key2", "Value": "tag-value2" },{ "Key": "tag-key3", "Value": "tag-value3" }]}' + ${result} = Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile2.result + Should contain ${result} TagCount + ${tagCount} = Execute and checkrc echo '${result}' | jq -r '.TagCount' 0 + Should Be Equal ${tagCount} 2 + +# Calling put-object-tagging on non-existent key + ${result} = Execute AWSS3APICli and checkrc put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/nonexistent --tagging '{"TagSet": [{ "Key": "tag-key1", "Value": "tag-value1" }]}' 255 + Should contain ${result} NoSuchKey + +#This test depends on the previous test case. Can't be executes alone +Get object tagging + + ${result} = Execute AWSS3ApiCli get-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + Should contain ${result} TagSet + ${tagCount} = Execute and checkrc echo '${result}' | jq '.TagSet | length' 0 + Should Be Equal ${tagCount} 2 + + +#This test depends on the previous test case. Can't be executes alone +Delete object tagging + + ${result} = Execute AWSS3ApiCli delete-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + ${result} = Execute AWSS3ApiCli get-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + Should contain ${result} TagSet + ${tagCount} = Execute and checkrc echo '${result}' | jq '.TagSet | length' 0 + Should Be Equal ${tagCount} 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh index b9a4c68587d..ab2807167d0 100755 --- a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh +++ b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh @@ -83,5 +83,6 @@ run_robot_test objectcopy run_robot_test objectmultidelete run_robot_test objecthead run_robot_test MultipartUpload +run_robot_test objecttagging rebot --outputdir results/ results/*.xml diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index e9b5dd5df72..e0c2fc7f818 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -45,15 +45,19 @@ S3 Gateway Secret Already Exists Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True S3 Gateway Generate Secret By Username - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True Should Match Regexp ${result} .*.* S3 Gateway Generate Secret By Username For Other User - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 Should contain ${result} HTTP/1.1 200 OK ignore_case=True Should Match Regexp ${result} .*.* + +S3 Gateway Reject Secret Generation By Non-admin User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 403 FORBIDDEN ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index 59725c0416c..ffb03a85a8a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -38,15 +38,19 @@ S3 Gateway Revoke Secret Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username For Other User - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 Should contain ${result} HTTP/1.1 200 OK ignore_case=True + +S3 Gateway Reject Secret Revoke By Non-admin User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 403 FORBIDDEN ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot deleted file mode 100644 index a78f94e5fa9..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test for bucket encryption -Library BuiltIn -Library String -Resource ../commonlib.robot -Resource ../lib/os.robot -Resource ../ozone-lib/shell.robot -Suite Setup Setup Test -Test Timeout 5 minutes - -*** Variables *** -${KEY_NAME} key1 -${VOLUME} - -*** Keywords *** -Setup Test - ${volume} = Create Random Volume - Set Suite Variable ${VOLUME} ${volume} - - -*** Test Cases *** -Create Encrypted Bucket - ${output} = Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket - Should Not Contain ${output} INVALID_REQUEST - Bucket Exists o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket - -Create Key in Encrypted Bucket - ${key} = Set Variable o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket/passwd - ${output} = Execute ozone sh key put ${key} /etc/passwd - Key Should Match Local File ${key} /etc/passwd diff --git a/hadoop-ozone/dist/src/shell/conf/log4j.properties b/hadoop-ozone/dist/src/shell/conf/log4j.properties index 96e90ab5417..aa3d0b4bf43 100644 --- a/hadoop-ozone/dist/src/shell/conf/log4j.properties +++ b/hadoop-ozone/dist/src/shell/conf/log4j.properties @@ -20,7 +20,7 @@ hadoop.log.dir=. hadoop.log.file=hadoop.log # Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter +log4j.rootLogger=${hadoop.root.logger} # Logging Threshold log4j.threshold=ALL @@ -129,13 +129,6 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR #log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - - # Log levels of third-party libraries log4j.logger.org.apache.commons.beanutils=WARN diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 22ceed9ed3c..0d005b3bd78 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -61,6 +61,7 @@ function ozone_usage ozone_add_subcommand "debug" client "Ozone debug tool" ozone_add_subcommand "repair" client "Ozone repair tool" ozone_add_subcommand "checknative" client "checks if native libraries are loaded" + ozone_add_subcommand "ratis" client "Ozone ratis tool" ozone_generate_usage "${OZONE_SHELL_EXECNAME}" false } @@ -231,6 +232,10 @@ function ozonecmd_case OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.checknative.CheckNative OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; + ratis) + OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.OzoneRatis + OZONE_RUN_ARTIFACT_NAME="ozone-tools" + ;; *) OZONE_CLASSNAME="${subcmd}" if ! ozone_validate_classname "${OZONE_CLASSNAME}"; then diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh index 84e2b73836e..0e357ddbe15 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh +++ b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh @@ -2817,14 +2817,6 @@ function ozone_assemble_classpath() { done ozone_add_classpath "${OZONE_HOME}/share/ozone/web" - #We need to add the artifact manually as it's not part the generated classpath desciptor - local MAIN_ARTIFACT - MAIN_ARTIFACT=$(find "$HDDS_LIB_JARS_DIR" -name "${OZONE_RUN_ARTIFACT_NAME}-*.jar") - if [[ -z "$MAIN_ARTIFACT" ]] || [[ ! -e "$MAIN_ARTIFACT" ]]; then - echo "ERROR: Component jar file $MAIN_ARTIFACT is missing from ${HDDS_LIB_JARS_DIR}" - fi - ozone_add_classpath "${MAIN_ARTIFACT}" - #Add optional jars to the classpath local OPTIONAL_CLASSPATH_DIR OPTIONAL_CLASSPATH_DIR="${HDDS_LIB_JARS_DIR}/${OZONE_RUN_ARTIFACT_NAME}" diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 4548459105f..90961941a46 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -20,9 +20,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-fault-injection-test org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Mini Ozone Chaos Tests Apache Ozone Mini Ozone Chaos Tests diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml index 97d10cbf761..35874911730 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -20,7 +20,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone-fault-injection-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-network-tests Apache Ozone Network Tests diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 432faab4877..e62f7e47dc0 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-fault-injection-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Fault Injection Tests Apache Ozone Fault Injection Tests pom diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index 7664643b153..bacc730a00f 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -22,16 +22,17 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-httpfsgateway - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT jar Apache Ozone HttpFS Apache Ozone HttpFS + false REPO NOT AVAIL REVISION NOT AVAIL yyyy-MM-dd'T'HH:mm:ssZ diff --git a/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties b/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties index 164896e1f05..16d13de384a 100644 --- a/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties +++ b/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties @@ -16,6 +16,3 @@ httpfs.version=${project.version} httpfs.source.repository=${httpfs.source.repository} httpfs.source.revision=${httpfs.source.revision} - -httpfs.build.username=${user.name} -httpfs.build.timestamp=${httpfs.build.timestamp} diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index bcfb1660244..fa3862a7f71 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -20,14 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-insight - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Insight Tool Apache Ozone Insight Tool jar + + false diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java index b4080796be2..690783ee411 100644 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java +++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java @@ -34,10 +34,6 @@ mixinStandardHelpOptions = true) public class Insight extends GenericCli { - public Insight() { - super(Insight.class); - } - public static void main(String[] args) throws Exception { new Insight().run(args); } diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index f66f64d2874..f4a2f713185 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-integration-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Integration Tests Apache Ozone Integration Tests jar @@ -141,6 +141,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -161,6 +165,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -216,6 +224,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -251,6 +263,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j jul-to-slf4j + + org.assertj + assertj-core + ${assertj.version} + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index 78b67f99f1e..97306475188 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -117,6 +117,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; import static org.assertj.core.api.Assertions.assertThat; @@ -184,9 +185,11 @@ void init() throws Exception { conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2); - + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); if (!bucketLayout.equals(FILE_SYSTEM_OPTIMIZED)) { @@ -407,7 +410,7 @@ public void testCreateWithInvalidPaths() throws Exception { } private void checkInvalidPath(Path path) { - InvalidPathException pathException = assertThrows( + InvalidPathException pathException = GenericTestUtils.assertThrows( InvalidPathException.class, () -> fs.create(path, false) ); assertThat(pathException.getMessage()).contains("Invalid path Name"); @@ -1829,12 +1832,14 @@ public void testLoopInLinkBuckets() throws Exception { String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, linkBucket1Name, linksVolume); - try { - FileSystem.get(URI.create(rootPath), cluster.getConf()); - fail("Should throw Exception due to loop in Link Buckets"); + try (FileSystem fileSystem = FileSystem.get(URI.create(rootPath), + cluster.getConf())) { + fail("Should throw Exception due to loop in Link Buckets" + + " while initialising fs with URI " + fileSystem.getUri()); } catch (OMException oe) { // Expected exception - assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, oe.getResult()); + assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, + oe.getResult()); } finally { volume.deleteBucket(linkBucket1Name); volume.deleteBucket(linkBucket2Name); @@ -1852,13 +1857,17 @@ public void testLoopInLinkBuckets() throws Exception { String rootPath2 = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, danglingLinkBucketName, linksVolume); + FileSystem fileSystem = null; try { - FileSystem.get(URI.create(rootPath2), cluster.getConf()); + fileSystem = FileSystem.get(URI.create(rootPath2), cluster.getConf()); } catch (OMException oe) { // Expected exception fail("Should not throw Exception and show orphan buckets"); } finally { volume.deleteBucket(danglingLinkBucketName); + if (fileSystem != null) { + fileSystem.close(); + } } } @@ -2085,8 +2094,8 @@ void testListStatus2() throws IOException { final long initialListStatusCount = omMetrics.getNumListStatus(); FileStatus[] statusList = fs.listStatus(createPath("/")); assertEquals(1, statusList.length); - assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 1); - assertEquals(initialListStatusCount + 1, omMetrics.getNumListStatus()); + assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2); + assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus()); assertEquals(fs.getFileStatus(path), statusList[0]); dirPath = RandomStringUtils.randomAlphanumeric(5); @@ -2097,13 +2106,48 @@ void testListStatus2() throws IOException { statusList = fs.listStatus(createPath("/")); assertEquals(2, statusList.length); - assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2); - assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus()); + assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 4); + assertEquals(initialListStatusCount + 4, omMetrics.getNumListStatus()); for (Path p : paths) { assertThat(Arrays.asList(statusList)).contains(fs.getFileStatus(p)); } } + @Test + public void testOzoneManagerListLocatedStatusAndListStatus() throws IOException { + String data = RandomStringUtils.randomAlphanumeric(20); + String directory = RandomStringUtils.randomAlphanumeric(5); + String filePath = RandomStringUtils.randomAlphanumeric(5); + Path path = createPath("/" + directory + "/" + filePath); + try (FSDataOutputStream stream = fs.create(path)) { + stream.writeBytes(data); + } + RemoteIterator listLocatedStatus = fs.listLocatedStatus(path); + int count = 0; + while (listLocatedStatus.hasNext()) { + LocatedFileStatus locatedFileStatus = listLocatedStatus.next(); + assertTrue(locatedFileStatus.getBlockLocations().length >= 1); + + for (BlockLocation blockLocation : locatedFileStatus.getBlockLocations()) { + assertTrue(blockLocation.getNames().length >= 1); + assertTrue(blockLocation.getHosts().length >= 1); + } + count++; + } + assertEquals(1, count); + count = 0; + RemoteIterator listStatus = fs.listStatusIterator(path); + while (listStatus.hasNext()) { + FileStatus fileStatus = listStatus.next(); + assertFalse(fileStatus instanceof LocatedFileStatus); + count++; + } + assertEquals(1, count); + FileStatus[] fileStatuses = fs.listStatus(path.getParent()); + assertEquals(1, fileStatuses.length); + assertFalse(fileStatuses[0] instanceof LocatedFileStatus); + } + @Test void testOzoneManagerFileSystemInterface() throws IOException { String dirPath = RandomStringUtils.randomAlphanumeric(5); @@ -2228,7 +2272,8 @@ void testFileSystemWithObjectStoreLayout() throws IOException { OzoneConfiguration config = new OzoneConfiguration(fs.getConf()); config.set(FS_DEFAULT_NAME_KEY, obsRootPath); - IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> FileSystem.get(config)); + IllegalArgumentException e = GenericTestUtils.assertThrows(IllegalArgumentException.class, + () -> FileSystem.get(config)); assertThat(e.getMessage()).contains("OBJECT_STORE, which does not support file system semantics"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 32a785a95a9..2251b105817 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -237,6 +237,8 @@ void initClusterAndEnv() throws IOException, InterruptedException, TimeoutExcept conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); if (bucketLayout == BucketLayout.FILE_SYSTEM_OPTIMIZED) { @@ -1069,7 +1071,7 @@ private void listStatusRecursiveHelper(Path curPath, List result) private List callAdapterListStatus(String pathStr, boolean recursive, String startPath, long numEntries) throws IOException { return adapter.listStatus(pathStr, recursive, startPath, numEntries, - ofs.getUri(), ofs.getWorkingDirectory(), ofs.getUsername()) + ofs.getUri(), ofs.getWorkingDirectory(), ofs.getUsername(), false) .stream().map(ofs::convertFileStatus).collect(Collectors.toList()); } @@ -1205,7 +1207,7 @@ void testSharedTmpDir() throws IOException { ClientProtocol proxy = objectStore.getClientProxy(); // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access // ACL admin owner, world read+write EnumSet aclRights = EnumSet.of(READ, WRITE); @@ -1308,7 +1310,7 @@ void testTempMount() throws IOException { ClientProtocol proxy = objectStore.getClientProxy(); // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, userRights); @@ -2309,7 +2311,7 @@ void testNonPrivilegedUserMkdirCreateBucket() throws IOException { // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, userRights); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java index 47c584e048a..67baea88357 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java @@ -65,15 +65,17 @@ public static void listStatusIteratorOnPageSize(OzoneConfiguration conf, URI uri = FileSystem.getDefaultUri(config); config.setBoolean( String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); - FileSystem subject = FileSystem.get(uri, config); - Path dir = new Path(Objects.requireNonNull(rootPath), "listStatusIterator"); - try { - Set paths = new TreeSet<>(); - for (int dirCount : dirCounts) { - listStatusIterator(subject, dir, paths, dirCount); + try (FileSystem subject = FileSystem.get(uri, config)) { + Path dir = new Path(Objects.requireNonNull(rootPath), + "listStatusIterator"); + try { + Set paths = new TreeSet<>(); + for (int dirCount : dirCounts) { + listStatusIterator(subject, dir, paths, dirCount); + } + } finally { + subject.delete(dir, true); } - } finally { - subject.delete(dir, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 0abfb133654..78fb4c66fc1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -18,7 +18,11 @@ package org.apache.hadoop.fs.ozone; +import java.util.List; +import java.util.Random; import java.util.concurrent.CompletableFuture; + +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -32,10 +36,16 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; @@ -48,12 +58,16 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,6 +78,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -72,6 +88,12 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.when; /** * Directory deletion service test cases. @@ -97,6 +119,7 @@ public static void init() throws Exception { conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); cluster = MiniOzoneCluster.newBuilder(conf) @@ -460,6 +483,123 @@ public void testDeleteFilesAndSubFiles() throws Exception { assertEquals(prevDeletedKeyCount + 5, currentDeletedKeyCount); } + private void createFileKey(OzoneBucket bucket, String key) + throws Exception { + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); + OzoneOutputStream fileKey = bucket.createKey(key, value.length); + fileKey.write(value); + fileKey.close(); + } + + /* + * Create key d1/k1 + * Create snap1 + * Rename dir1 to dir2 + * Delete dir2 + * Wait for KeyDeletingService to start processing deleted key k2 + * Create snap2 by making the KeyDeletingService thread wait till snap2 is flushed + * Resume KeyDeletingService thread. + * Read d1 from snap1. + */ + @Test + public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() + throws Exception { + OMMetadataManager omMetadataManager = cluster.getOzoneManager().getMetadataManager(); + Table snapshotInfoTable = omMetadataManager.getSnapshotInfoTable(); + Table deletedDirTable = omMetadataManager.getDeletedDirTable(); + Table renameTable = omMetadataManager.getSnapshotRenamedTable(); + cluster.getOzoneManager().getKeyManager().getSnapshotDeletingService().shutdown(); + DirectoryDeletingService dirDeletingService = cluster.getOzoneManager().getKeyManager().getDirDeletingService(); + // Suspend KeyDeletingService + dirDeletingService.suspend(); + GenericTestUtils.waitFor(() -> !dirDeletingService.isRunningOnAOS(), 1000, 10000); + Random random = new Random(); + final String testVolumeName = "volume" + random.nextInt(); + final String testBucketName = "bucket" + random.nextInt(); + // Create Volume and Buckets + ObjectStore store = client.getObjectStore(); + store.createVolume(testVolumeName); + OzoneVolume volume = store.getVolume(testVolumeName); + volume.createBucket(testBucketName, + BucketArgs.newBuilder().setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build()); + OzoneBucket bucket = volume.getBucket(testBucketName); + + OzoneManager ozoneManager = Mockito.spy(cluster.getOzoneManager()); + OmSnapshotManager omSnapshotManager = Mockito.spy(ozoneManager.getOmSnapshotManager()); + when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> omSnapshotManager); + DirectoryDeletingService service = Mockito.spy(new DirectoryDeletingService(1000, TimeUnit.MILLISECONDS, 1000, + ozoneManager, + cluster.getConf(), 1)); + service.shutdown(); + final int initialSnapshotCount = + (int) cluster.getOzoneManager().getMetadataManager().countRowsInTable(snapshotInfoTable); + final int initialDeletedCount = (int) omMetadataManager.countRowsInTable(deletedDirTable); + final int initialRenameCount = (int) omMetadataManager.countRowsInTable(renameTable); + String snap1 = "snap1"; + String snap2 = "snap2"; + createFileKey(bucket, "dir1/key1"); + store.createSnapshot(testVolumeName, testBucketName, "snap1"); + bucket.renameKey("dir1", "dir2"); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(testVolumeName) + .setBucketName(testBucketName) + .setKeyName("dir2").build(); + long objectId = store.getClientProxy().getOzoneManagerClient().getKeyInfo(omKeyArgs, false) + .getKeyInfo().getObjectID(); + long volumeId = omMetadataManager.getVolumeId(testVolumeName); + long bucketId = omMetadataManager.getBucketId(testVolumeName, testBucketName); + String deletePathKey = omMetadataManager.getOzoneDeletePathKey(objectId, + omMetadataManager.getOzonePathKey(volumeId, + bucketId, bucketId, "dir2")); + bucket.deleteDirectory("dir2", true); + + + assertTableRowCount(deletedDirTable, initialDeletedCount + 1); + assertTableRowCount(renameTable, initialRenameCount + 1); + Mockito.doAnswer(i -> { + List purgePathRequestList = i.getArgument(5); + for (OzoneManagerProtocolProtos.PurgePathRequest purgeRequest : purgePathRequestList) { + Assertions.assertNotEquals(deletePathKey, purgeRequest.getDeletedDir()); + } + return i.callRealMethod(); + }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(), anyLong(), + anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(), Mockito.any(), any(), anyLong()); + + Mockito.doAnswer(i -> { + store.createSnapshot(testVolumeName, testBucketName, snap2); + GenericTestUtils.waitFor(() -> { + try { + SnapshotInfo snapshotInfo = store.getClientProxy().getOzoneManagerClient() + .getSnapshotInfo(testVolumeName, testBucketName, snap2); + + return OmSnapshotManager.areSnapshotChangesFlushedToDB(cluster.getOzoneManager().getMetadataManager(), + snapshotInfo); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 100000); + GenericTestUtils.waitFor(() -> { + try { + return renameTable.get(omMetadataManager.getRenameKey(testVolumeName, testBucketName, objectId)) == null; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 10000); + return i.callRealMethod(); + }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(testVolumeName), ArgumentMatchers.eq(testBucketName), + ArgumentMatchers.eq(snap1)); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1); + service.runPeriodicalTaskNow(); + service.runPeriodicalTaskNow(); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2); + store.deleteSnapshot(testVolumeName, testBucketName, snap2); + service.runPeriodicalTaskNow(); + store.deleteSnapshot(testVolumeName, testBucketName, snap1); + cluster.restartOzoneManager(); + assertTableRowCount(cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable(), initialSnapshotCount); + dirDeletingService.resume(); + } + @Test public void testDirDeletedTableCleanUpForSnapshot() throws Exception { Table deletedDirTable = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index 49b515d53c5..f185addf6b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -70,6 +70,7 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -83,7 +84,10 @@ import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.common.ChecksumCache; +import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.container.keyvalue.impl.AbstractTestChunkManager; import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -93,6 +97,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.OpenKeyCleanupService; import org.apache.hadoop.security.UserGroupInformation; @@ -168,6 +173,7 @@ public class TestHSync { private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE; private static final int SERVICE_INTERVAL = 100; private static final int EXPIRE_THRESHOLD_MS = 140; + private static final int WAL_HEADER_LEN = 83; private static OpenKeyCleanupService openKeyCleanupService; @@ -177,6 +183,8 @@ public static void init() throws Exception { CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + CONF.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + CONF.setBoolean("ozone.client.hbase.enhancements.allowed", true); CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // Reduce KeyDeletingService interval @@ -219,8 +227,8 @@ public static void init() throws Exception { GenericTestUtils.setLogLevel(BlockOutputStream.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(KeyValueHandler.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(BufferPool.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(ChecksumCache.LOG, Level.DEBUG); openKeyCleanupService = (OpenKeyCleanupService) cluster.getOzoneManager().getKeyManager().getOpenKeyCleanupService(); @@ -343,6 +351,8 @@ public void testEmptyHsync() throws Exception { } @Test + // Making this the second test to be run to avoid lingering block files from previous tests + @Order(2) public void testKeyHSyncThenClose() throws Exception { // Check that deletedTable should not have keys with the same block as in // keyTable's when a key is hsync()'ed then close()'d. @@ -358,10 +368,16 @@ public void testKeyHSyncThenClose() throws Exception { String data = "random data"; final Path file = new Path(dir, "file-hsync-then-close"); try (FileSystem fs = FileSystem.get(CONF)) { + String chunkPath; try (FSDataOutputStream outputStream = fs.create(file, true)) { outputStream.write(data.getBytes(UTF_8), 0, data.length()); outputStream.hsync(); + // locate the container chunk path on the first DataNode. + chunkPath = getChunkPathOnDataNode(outputStream); + assertFalse(AbstractTestChunkManager.checkChunkFilesClosed(chunkPath)); } + // After close, the chunk file should be closed. + assertTrue(AbstractTestChunkManager.checkChunkFilesClosed(chunkPath)); } OzoneManager ozoneManager = cluster.getOzoneManager(); @@ -387,6 +403,61 @@ public void testKeyHSyncThenClose() throws Exception { } } + private static String getChunkPathOnDataNode(FSDataOutputStream outputStream) + throws IOException { + String chunkPath; + KeyOutputStream groupOutputStream = + ((OzoneFSOutputStream) outputStream.getWrappedStream()).getWrappedOutputStream().getKeyOutputStream(); + List locationInfoList = + groupOutputStream.getLocationInfoList(); + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster); + chunkPath = dn.getDatanodeStateMachine() + .getContainer().getContainerSet() + .getContainer(omKeyLocationInfo.getContainerID()). + getContainerData().getChunksPath(); + return chunkPath; + } + + @Test + public void testHSyncSeek() throws Exception { + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s.%s/", + OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + final Path key1 = new Path(dir, "key-hsync-seek"); + + final byte[] data = new byte[1024]; + final byte[] buffer = new byte[1024]; + ThreadLocalRandom.current().nextBytes(data); + + try (FileSystem fs = FileSystem.get(CONF)) { + // Create key1 + try (FSDataOutputStream os = fs.create(key1, true)) { + os.write(data, 0, WAL_HEADER_LEN); + // the first hsync will update the correct length in the key info at OM + os.hsync(); + os.write(data, 0, data.length); + os.hsync(); // the second hsync will not update the length at OM + try (FSDataInputStream in = fs.open(key1)) { + // the actual key length is WAL_HEADER_LEN + 1024, but the length in OM is WAL_HEADER_LEN (83) + in.seek(WAL_HEADER_LEN + 1); + final int n = in.read(buffer, 1, buffer.length - 1); + // expect to read 1023 bytes + assertEquals(buffer.length - 1, n); + for (int i = 1; i < buffer.length; i++) { + assertEquals(data[i], buffer[i], "expected at i=" + i); + } + } + } finally { + fs.delete(key1, false); + } + } + } + @ParameterizedTest @ValueSource(booleans = {false, true}) public void testO3fsHSync(boolean incrementalChunkList) throws Exception { @@ -425,6 +496,52 @@ public void testOfsHSync(boolean incrementalChunkList) throws Exception { } } + @Test + public void testHSyncOpenKeyCommitAfterExpiry() throws Exception { + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final Path key1 = new Path("hsync-key"); + final Path key2 = new Path("key2"); + + try (FileSystem fs = FileSystem.get(CONF)) { + // Create key1 with hsync + try (FSDataOutputStream os = fs.create(key1, true)) { + os.write(1); + os.hsync(); + // Create key2 without hsync + try (FSDataOutputStream os1 = fs.create(key2, true)) { + os1.write(1); + // There should be 2 key in openFileTable + assertThat(2 == getOpenKeyInfo(BUCKET_LAYOUT).size()); + // One key will be in fileTable as hsynced + assertThat(1 == getKeyInfo(BUCKET_LAYOUT).size()); + + // Resume openKeyCleanupService + openKeyCleanupService.resume(); + // Verify hsync openKey gets committed eventually + // Key without hsync is deleted + GenericTestUtils.waitFor(() -> + 0 == getOpenKeyInfo(BUCKET_LAYOUT).size(), 1000, 12000); + // Verify only one key is still present in fileTable + assertThat(1 == getKeyInfo(BUCKET_LAYOUT).size()); + + // Clean up + assertTrue(fs.delete(key1, false)); + waitForEmptyDeletedTable(); + } catch (OMException ex) { + assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ex.getResult()); + } + } catch (OMException ex) { + assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ex.getResult()); + } finally { + openKeyCleanupService.suspend(); + } + } + } + @Test public void testHSyncDeletedKey() throws Exception { // Verify that a key can't be successfully hsync'ed again after it's deleted, @@ -524,6 +641,21 @@ private List getOpenKeyInfo(BucketLayout bucketLayout) { return omKeyInfo; } + private List getKeyInfo(BucketLayout bucketLayout) { + List omKeyInfo = new ArrayList<>(); + + Table openFileTable = + cluster.getOzoneManager().getMetadataManager().getKeyTable(bucketLayout); + try (TableIterator> + iterator = openFileTable.iterator()) { + while (iterator.hasNext()) { + omKeyInfo.add(iterator.next().getValue()); + } + } catch (Exception e) { + } + return omKeyInfo; + } + @Test public void testUncommittedBlocks() throws Exception { waitForEmptyDeletedTable(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java index 917ce57fe7d..624b5e02c14 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSyncUpgrade.java @@ -107,6 +107,8 @@ public void init() throws Exception { conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // Reduce KeyDeletingService interval diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index a4a9bcff470..6a3a0eb5b67 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -120,6 +120,8 @@ public void init() throws IOException, InterruptedException, final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED; conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java index f1cedf59c3a..8e8cc63a7d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java @@ -122,8 +122,16 @@ void teardown() throws IOException { void fileSystemWithUnsupportedDefaultBucketLayout(String layout) { OzoneConfiguration conf = configWithDefaultBucketLayout(layout); - OMException e = assertThrows(OMException.class, - () -> FileSystem.newInstance(conf)); + OMException e = assertThrows(OMException.class, () -> { + FileSystem fileSystem = null; + try { + fileSystem = FileSystem.newInstance(conf); + } finally { + if (fileSystem != null) { + fileSystem.close(); + } + } + }); assertThat(e.getMessage()) .contains(ERROR_MAP.get(layout)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java index 649ed50a102..7b5a9580805 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; import com.google.common.collect.ImmutableList; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileSystem; @@ -39,6 +40,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; @@ -53,10 +55,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.TestDataUtil.createBucket; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * Test FileChecksum API. @@ -68,10 +73,16 @@ public class TestOzoneFileChecksum { true, false }; - private static final int[] DATA_SIZES = DoubleStream.of(0.5, 1, 1.5, 2, 7, 8) - .mapToInt(mb -> (int) (1024 * 1024 * mb)) + private static final int[] DATA_SIZES_1 = DoubleStream.of(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 8, 9, 10) + .mapToInt(mb -> (int) (1024 * 1024 * mb) + 510000) .toArray(); + private static final int[] DATA_SIZES_2 = DoubleStream.of(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 8, 9, 10) + .mapToInt(mb -> (int) (1024 * 1024 * mb) + 820000) + .toArray(); + + private int[] dataSizes = new int[DATA_SIZES_1.length + DATA_SIZES_2.length]; + private OzoneConfiguration conf; private MiniOzoneCluster cluster = null; private FileSystem fs; @@ -84,6 +95,8 @@ public class TestOzoneFileChecksum { void setup() throws IOException, InterruptedException, TimeoutException { conf = new OzoneConfiguration(); + conf.setStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, 1024 * 1024, StorageUnit.BYTES); + conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 2 * 1024 * 1024, StorageUnit.BYTES); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) .build(); @@ -95,9 +108,8 @@ void setup() throws IOException, OzoneConsts.OZONE_OFS_URI_SCHEME); conf.setBoolean(disableCache, true); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - fs = FileSystem.get(conf); - ofs = (RootedOzoneFileSystem) fs; - adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); + System.arraycopy(DATA_SIZES_1, 0, dataSizes, 0, DATA_SIZES_1.length); + System.arraycopy(DATA_SIZES_2, 0, dataSizes, DATA_SIZES_1.length, DATA_SIZES_2.length); } @AfterEach @@ -112,9 +124,13 @@ void teardown() { * Test EC checksum with Replicated checksum. */ @ParameterizedTest - @MethodSource("missingIndexes") - void testEcFileChecksum(List missingIndexes) throws IOException { + @MethodSource("missingIndexesAndChecksumSize") + void testEcFileChecksum(List missingIndexes, double checksumSizeInMB) throws IOException { + conf.setInt("ozone.client.bytes.per.checksum", (int) (checksumSizeInMB * 1024 * 1024)); + fs = FileSystem.get(conf); + ofs = (RootedOzoneFileSystem) fs; + adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); String volumeName = UUID.randomUUID().toString(); String legacyBucket = UUID.randomUUID().toString(); String ecBucketName = UUID.randomUUID().toString(); @@ -139,7 +155,7 @@ void testEcFileChecksum(List missingIndexes) throws IOException { Map replicatedChecksums = new HashMap<>(); - for (int dataLen : DATA_SIZES) { + for (int dataLen : dataSizes) { byte[] data = randomAlphabetic(dataLen).getBytes(UTF_8); try (OutputStream file = adapter.createFile(volumeName + "/" @@ -170,7 +186,7 @@ void testEcFileChecksum(List missingIndexes) throws IOException { clientConf.setBoolean(OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, topologyAware); try (FileSystem fsForRead = FileSystem.get(clientConf)) { - for (int dataLen : DATA_SIZES) { + for (int dataLen : dataSizes) { // Compute checksum after failed DNs Path parent = new Path("/" + volumeName + "/" + ecBucketName + "/"); Path ecKey = new Path(parent, "test" + dataLen); @@ -187,14 +203,13 @@ void testEcFileChecksum(List missingIndexes) throws IOException { } } - static Stream> missingIndexes() { + static Stream missingIndexesAndChecksumSize() { return Stream.of( - ImmutableList.of(0, 1), - ImmutableList.of(1, 2), - ImmutableList.of(2, 3), - ImmutableList.of(3, 4), - ImmutableList.of(0, 3), - ImmutableList.of(0, 4) - ); + arguments(ImmutableList.of(0, 1), 0.001), + arguments(ImmutableList.of(1, 2), 0.01), + arguments(ImmutableList.of(2, 3), 0.1), + arguments(ImmutableList.of(3, 4), 0.5), + arguments(ImmutableList.of(0, 3), 1), + arguments(ImmutableList.of(0, 4), 2)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index 8e0bd1ac7de..ccfa0625800 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -24,6 +24,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; import java.util.concurrent.atomic.AtomicInteger; @@ -49,10 +51,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.assertj.core.api.Assertions.assertThat; @@ -91,6 +96,8 @@ static void initClass() throws Exception { conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 20); + conf.setInt(OZONE_FS_LISTING_PAGE_SIZE, 30); // Start the cluster cluster = MiniOzoneCluster.newHABuilder(conf) @@ -289,6 +296,13 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { String snapshotPath2 = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snapshotName2; String snapshotKeyPath2 = snapshotPath2 + OM_KEY_PREFIX + key2; + List snapshotNames = new ArrayList<>(); + for (int i = 0; i < cluster.getConf().getInt(OZONE_FS_LISTING_PAGE_SIZE, + OZONE_FS_LISTING_PAGE_SIZE_DEFAULT) * 2; i++) { + snapshotNames.add(createSnapshot()); + } + String snapshotName3 = createSnapshot(); + int res = ToolRunner.run(shell, new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName1}); @@ -313,6 +327,10 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { assertThat(listSnapOut).doesNotContain(snapshotName1); assertThat(listSnapOut).contains(snapshotName2); + assertThat(listSnapOut).contains(snapshotName3); + for (String snapshotName : snapshotNames) { + assertThat(listSnapOut).contains(snapshotName); + } // Check for snapshot keys with "ozone fs -ls" String listSnapKeyOut = execShellCommandAndGetOutput(1, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java index b79c9a870e4..bce96251873 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java @@ -47,6 +47,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.assertj.core.api.Assumptions.assumeThat; @@ -93,6 +94,8 @@ protected static OzoneConfiguration createBaseConfiguration() { conf.addResource(CONTRACT_XML); + conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); return conf; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java index 4ff671df616..7a1366ad682 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java @@ -15,17 +15,13 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java index 9db501edb72..4f6927d6fae 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java @@ -18,16 +18,13 @@ import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancer; import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancerConfiguration; import org.apache.hadoop.hdds.scm.container.balancer.IllegalContainerBalancerStateException; import org.apache.hadoop.hdds.scm.container.balancer.InvalidContainerBalancerConfigurationException; -import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; @@ -37,7 +34,6 @@ import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.ozone.test.GenericTestUtils; @@ -47,12 +43,9 @@ import org.slf4j.event.Level; import java.io.IOException; -import java.util.Map; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBalancerConfigurationProto; -import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; -import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -148,90 +141,6 @@ public void testFailover() throws Exception { .contains("Performing failover to suggested leader"); } - @Test - public void testMoveFailover() throws Exception { - SCMClientConfig scmClientConfig = - conf.getObject(SCMClientConfig.class); - scmClientConfig.setRetryCount(1); - scmClientConfig.setRetryInterval(100); - scmClientConfig.setMaxRetryTimeout(1500); - assertEquals(15, scmClientConfig.getRetryCount()); - conf.setFromObject(scmClientConfig); - StorageContainerManager scm = getLeader(cluster); - assertNotNull(scm); - - final ContainerID id = - getContainer(HddsProtos.LifeCycleState.CLOSED).containerID(); - DatanodeDetails dn1 = randomDatanodeDetails(); - DatanodeDetails dn2 = randomDatanodeDetails(); - - //here we just want to test whether the new leader will get the same - //inflight move after failover, so no need to create container and datanode, - //just mock them bypassing all the pre checks. - scm.getReplicationManager().getMoveScheduler().startMove(id.getProtobuf(), - (new MoveDataNodePair(dn1, dn2)) - .getProtobufMessage(ClientVersion.CURRENT_VERSION)); - - SCMBlockLocationFailoverProxyProvider failoverProxyProvider = - new SCMBlockLocationFailoverProxyProvider(conf); - failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId()); - ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = - new ScmBlockLocationProtocolClientSideTranslatorPB( - failoverProxyProvider); - GenericTestUtils - .setLogLevel(SCMBlockLocationFailoverProxyProvider.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer - .captureLogs(SCMBlockLocationFailoverProxyProvider.LOG); - ScmBlockLocationProtocol scmBlockLocationProtocol = TracingUtil - .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, - conf); - scmBlockLocationProtocol.getScmInfo(); - assertThat(logCapture.getOutput()) - .contains("Performing failover to suggested leader"); - scm = getLeader(cluster); - assertNotNull(scm); - - //switch to the new leader successfully, new leader should - //get the same inflightMove - Map inflightMove = - scm.getReplicationManager().getMoveScheduler().getInflightMove(); - assertThat(inflightMove).containsKey(id); - MoveDataNodePair mp = inflightMove.get(id); - assertEquals(dn2, mp.getTgt()); - assertEquals(dn1, mp.getSrc()); - - //complete move in the new leader - scm.getReplicationManager().getMoveScheduler() - .completeMove(id.getProtobuf()); - - - SCMContainerLocationFailoverProxyProvider proxyProvider = - new SCMContainerLocationFailoverProxyProvider(conf, null); - GenericTestUtils.setLogLevel(SCMContainerLocationFailoverProxyProvider.LOG, - Level.DEBUG); - logCapture = GenericTestUtils.LogCapturer - .captureLogs(SCMContainerLocationFailoverProxyProvider.LOG); - proxyProvider.changeCurrentProxy(scm.getSCMNodeId()); - StorageContainerLocationProtocol scmContainerClient = - TracingUtil.createProxy( - new StorageContainerLocationProtocolClientSideTranslatorPB( - proxyProvider), StorageContainerLocationProtocol.class, conf); - - scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, "ozone"); - assertThat(logCapture.getOutput()) - .contains("Performing failover to suggested leader"); - - //switch to the new leader successfully, new leader should - //get the same inflightMove , which should not contains - //that container. - scm = getLeader(cluster); - assertNotNull(scm); - inflightMove = scm.getReplicationManager() - .getMoveScheduler().getInflightMove(); - assertThat(inflightMove).doesNotContainKey(id); - } - /** * Starts ContainerBalancer when the cluster is already balanced. * ContainerBalancer will identify that no unbalanced nodes are present and diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index 400c4868a99..e90c576e8dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -128,15 +128,14 @@ private DBCheckpoint downloadSnapshot() throws Exception { public void testInstallCheckPoint() throws Exception { DBCheckpoint checkpoint = downloadSnapshot(); StorageContainerManager scm = cluster.getStorageContainerManager(); - DBStore db = HAUtils - .loadDB(conf, checkpoint.getCheckpointLocation().getParent().toFile(), - checkpoint.getCheckpointLocation().getFileName().toString(), - new SCMDBDefinition()); + final Path location = checkpoint.getCheckpointLocation(); + final DBStore db = HAUtils.loadDB(conf, location.getParent().toFile(), + location.getFileName().toString(), SCMDBDefinition.get()); // Hack the transaction index in the checkpoint so as to ensure the // checkpointed transaction index is higher than when it was downloaded // from. assertNotNull(db); - HAUtils.getTransactionInfoTable(db, new SCMDBDefinition()) + HAUtils.getTransactionInfoTable(db, SCMDBDefinition.get()) .put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(10, 100)); db.close(); ContainerID cid = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java index 10492736144..e55355525a6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java @@ -224,9 +224,8 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { DBCheckpoint leaderDbCheckpoint = leaderSCM.getScmMetadataStore().getStore() .getCheckpoint(false); Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation(); - TransactionInfo leaderCheckpointTrxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation, - new SCMDBDefinition()); + final TransactionInfo leaderCheckpointTrxnInfo = HAUtils.getTrxnInfoFromCheckpoint( + conf, leaderCheckpointLocation, SCMDBDefinition.get()); assertNotNull(leaderCheckpointLocation); // Take a backup of the current DB diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java index fb92d91ee71..45458182344 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.security.exception.SCMSecretKeyException; import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.minikdc.MiniKdc; @@ -67,7 +66,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.hdds.security.exception.SCMSecretKeyException.ErrorCode.SECRET_KEY_NOT_ENABLED; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getSecretKeyClientForDatanode; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; @@ -245,24 +243,14 @@ public void testSecretKeyApiSuccess() throws Exception { } /** - * Verify API behavior when block token is not enable. + * Verify API behavior. */ @Test - public void testSecretKeyApiNotEnabled() throws Exception { + public void testSecretKeyApi() throws Exception { startCluster(1); SecretKeyProtocol secretKeyProtocol = getSecretKeyProtocol(); - - SCMSecretKeyException ex = assertThrows(SCMSecretKeyException.class, - secretKeyProtocol::getCurrentSecretKey); - assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); - - ex = assertThrows(SCMSecretKeyException.class, - () -> secretKeyProtocol.getSecretKey(UUID.randomUUID())); - assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); - - ex = assertThrows(SCMSecretKeyException.class, - secretKeyProtocol::getAllSecretKeys); - assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); + assertNull(secretKeyProtocol.getSecretKey(UUID.randomUUID())); + assertEquals(1, secretKeyProtocol.getAllSecretKeys().size()); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 95d7faa9174..94c8f914294 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventExecutor; -import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.server.events.FixedThreadPoolWithAffinityExecutor; import org.apache.hadoop.hdds.utils.HddsVersionInfo; @@ -155,7 +154,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; @@ -788,10 +786,6 @@ public void testCloseContainerCommandOnRestart() throws Exception { NodeManager nodeManager = mock(NodeManager.class); setInternalState(rm, "nodeManager", nodeManager); - EventPublisher publisher = mock(EventPublisher.class); - setInternalState(rm.getLegacyReplicationManager(), - "eventPublisher", publisher); - UUID dnUuid = cluster.getHddsDatanodes().iterator().next() .getDatanodeDetails().getUuid(); @@ -811,15 +805,7 @@ public void testCloseContainerCommandOnRestart() throws Exception { cluster.getStorageContainerManager() .getReplicationManager().processAll(); Thread.sleep(5000); - - if (rm.getConfig().isLegacyEnabled()) { - CommandForDatanode commandForDatanode = new CommandForDatanode( - dnUuid, closeContainerCommand); - verify(publisher).fireEvent(eq(SCMEvents.DATANODE_COMMAND), argThat(new - CloseContainerCommandMatcher(dnUuid, commandForDatanode))); - } else { - verify(nodeManager).addDatanodeCommand(dnUuid, closeContainerCommand); - } + verify(nodeManager).addDatanodeCommand(dnUuid, closeContainerCommand); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java index 2986484d2ad..2f9c8c938a3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.ha.SCMHAMetrics; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -43,9 +44,10 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.statemachine.SnapshotInfo; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.time.Instant; @@ -54,7 +56,9 @@ import java.util.List; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; @@ -72,6 +76,8 @@ @Timeout(300) public class TestStorageContainerManagerHA { + private static final Logger LOG = LoggerFactory.getLogger(TestStorageContainerManagerHA.class); + private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; private String omServiceId; @@ -86,7 +92,6 @@ public class TestStorageContainerManagerHA { * * @throws IOException */ - @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); @@ -118,6 +123,7 @@ public void shutdown() { @Test void testAllSCMAreRunning() throws Exception { + init(); int count = 0; List scms = cluster.getStorageContainerManagers(); assertEquals(numOfSCMs, scms.size()); @@ -129,6 +135,9 @@ void testAllSCMAreRunning() throws Exception { count++; leaderScm = scm; } + if (SCMHAUtils.isSCMHAEnabled(conf)) { + assertNotNull(scm.getScmHAManager().getRatisServer().getLeaderId()); + } assertEquals(peerSize, numOfSCMs); } assertEquals(1, count); @@ -246,6 +255,7 @@ private boolean areAllScmInSync(long leaderIndex) { @Test public void testPrimordialSCM() throws Exception { + init(); StorageContainerManager scm1 = cluster.getStorageContainerManagers().get(0); StorageContainerManager scm2 = cluster.getStorageContainerManagers().get(1); OzoneConfiguration conf1 = scm1.getConfiguration(); @@ -264,6 +274,7 @@ public void testPrimordialSCM() throws Exception { @Test public void testBootStrapSCM() throws Exception { + init(); StorageContainerManager scm2 = cluster.getStorageContainerManagers().get(1); OzoneConfiguration conf2 = scm2.getConfiguration(); boolean isDeleted = scm2.getScmStorageConfig().getVersionFile().delete(); @@ -323,4 +334,72 @@ private void waitForLeaderToBeReady() }, 1000, (int) ScmConfigKeys .OZONE_SCM_HA_RATIS_LEADER_READY_WAIT_TIMEOUT_DEFAULT); } + + @Test + public void testSCMLeadershipMetric() throws IOException, InterruptedException { + // GIVEN + int scmInstancesCount = 3; + conf = new OzoneConfiguration(); + MiniOzoneHAClusterImpl.Builder haMiniClusterBuilder = MiniOzoneCluster.newHABuilder(conf) + .setSCMServiceId("scm-service-id") + .setOMServiceId("om-service-id") + .setNumOfActiveOMs(0) + .setNumOfStorageContainerManagers(scmInstancesCount) + .setNumOfActiveSCMs(1); + haMiniClusterBuilder.setNumDatanodes(0); + + // start single SCM instance without other Ozone services + // in order to initialize and bootstrap SCM instances only + cluster = haMiniClusterBuilder.build(); + + List storageContainerManagersList = cluster.getStorageContainerManagersList(); + + // stop the single SCM instance in order to imitate further simultaneous start of SCMs + storageContainerManagersList.get(0).stop(); + storageContainerManagersList.get(0).join(); + + // WHEN (imitate simultaneous start of the SCMs) + int retryCount = 0; + while (true) { + CountDownLatch scmInstancesCounter = new CountDownLatch(scmInstancesCount); + AtomicInteger failedSCMs = new AtomicInteger(); + for (StorageContainerManager scm : storageContainerManagersList) { + new Thread(() -> { + try { + scm.start(); + } catch (IOException e) { + failedSCMs.incrementAndGet(); + } finally { + scmInstancesCounter.countDown(); + } + }).start(); + } + scmInstancesCounter.await(); + if (failedSCMs.get() == 0) { + break; + } else { + for (StorageContainerManager scm : storageContainerManagersList) { + scm.stop(); + scm.join(); + LOG.info("Stopping StorageContainerManager server at {}", + scm.getClientRpcAddress()); + } + ++retryCount; + LOG.info("SCMs port conflicts, retried {} times", + retryCount); + failedSCMs.set(0); + } + } + + // THEN expect only one SCM node (leader) will have 'scmha_metrics_scmha_leader_state' metric set to 1 + int leaderCount = 0; + for (StorageContainerManager scm : storageContainerManagersList) { + if (scm.getScmHAMetrics() != null && scm.getScmHAMetrics().getSCMHAMetricsInfoLeaderState() == 1) { + leaderCount++; + break; + } + } + assertEquals(1, leaderCount); + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java similarity index 98% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java index bec14b23b0f..4c950e7d725 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; +package org.apache.hadoop.hdds.scm; import java.io.IOException; import java.io.OutputStream; @@ -35,12 +35,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java index 95a0b0e17fd..83a9b106c8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java @@ -29,16 +29,16 @@ import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import java.io.IOException; -import java.util.UUID; +import java.nio.file.Path; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; @@ -82,12 +82,10 @@ public static void shutdown() { @ParameterizedTest(name = "Ozone security enabled: {0}") @ValueSource(booleans = {false, true}) - public void testCaching(boolean securityEnabled) throws IOException { + public void testCaching(boolean securityEnabled, @TempDir Path metaDir) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, securityEnabled); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); ClientTrustManager trustManager = mock(ClientTrustManager.class); try (XceiverClientManager clientManager = new XceiverClientManager(conf, @@ -124,13 +122,11 @@ public void testCaching(boolean securityEnabled) throws IOException { } @Test - public void testFreeByReference() throws IOException { + public void testFreeByReference(@TempDir Path metaDir) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); clientConfig.setMaxSize(1); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); try (XceiverClientManager clientManager = new XceiverClientManager(conf, clientConfig, null)) { Cache cache = @@ -181,13 +177,11 @@ public void testFreeByReference() throws IOException { } @Test - public void testFreeByEviction() throws IOException { + public void testFreeByEviction(@TempDir Path metaDir) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); clientConfig.setMaxSize(1); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); try (XceiverClientManager clientManager = new XceiverClientManager(conf, clientConfig, null)) { Cache cache = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java index c4f62040536..d789112e471 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java @@ -22,9 +22,9 @@ import static org.apache.ozone.test.MetricsAsserts.getLongCounter; import static org.apache.ozone.test.MetricsAsserts.getMetrics; +import java.nio.file.Path; import java.util.List; import java.util.ArrayList; -import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -41,10 +41,12 @@ import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; /** * This class tests the metrics of XceiverClient. @@ -76,11 +78,10 @@ public static void shutdown() { } @Test - public void testMetrics() throws Exception { + @Flaky("HDDS-11646") + public void testMetrics(@TempDir Path metaDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); try (XceiverClientManager clientManager = new XceiverClientManager(conf)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java index 75d860d951b..4a9efceeb7b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -62,16 +61,15 @@ public void init() * * @throws Exception */ - @Flaky("HDDS-11359") @Test public void testPipelineInfo() throws Exception { ObjectName bean = new ObjectName( "Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo"); - Map pipelineStateCount = cluster - .getStorageContainerManager().getPipelineManager().getPipelineInfo(); GenericTestUtils.waitFor(() -> { try { + Map pipelineStateCount = cluster + .getStorageContainerManager().getPipelineManager().getPipelineInfo(); final TabularData data = (TabularData) mbs.getAttribute( bean, "PipelineInfo"); for (Map.Entry entry : pipelineStateCount.entrySet()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index c274d8fea30..1b7eb837cf8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -61,6 +61,7 @@ import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.SecretKeyTestClient; +import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; import org.apache.hadoop.ozone.client.io.InsufficientLocationsException; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -83,6 +84,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; @@ -99,6 +101,7 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.UUID; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; @@ -117,6 +120,7 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * This class tests container commands on EC containers. @@ -456,7 +460,7 @@ public void testCreateRecoveryContainer() throws Exception { int replicaIndex = 4; XceiverClientSpi dnClient = xceiverClientManager.acquireClient( createSingleNodePipeline(newPipeline, newPipeline.getNodes().get(0), - replicaIndex)); + 2)); try { // To create the actual situation, container would have been in closed // state at SCM. @@ -613,30 +617,33 @@ private static byte[] getBytesWith(int singleDigitNumber, int total) { @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWith(List missingIndexes) + void testECReconstructionCoordinatorWith(List missingIndexes, boolean triggerRetry) throws Exception { - testECReconstructionCoordinator(missingIndexes, 3); + testECReconstructionCoordinator(missingIndexes, 3, triggerRetry); } @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes) - throws Exception { - testECReconstructionCoordinator(missingIndexes, 1); + void testECReconstructionCoordinatorWithPartialStripe(List missingIndexes, + boolean triggerRetry) throws Exception { + testECReconstructionCoordinator(missingIndexes, 1, triggerRetry); } @ParameterizedTest @MethodSource("recoverableMissingIndexes") - void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes) - throws Exception { - testECReconstructionCoordinator(missingIndexes, 4); + void testECReconstructionCoordinatorWithFullAndPartialStripe(List missingIndexes, + boolean triggerRetry) throws Exception { + testECReconstructionCoordinator(missingIndexes, 4, triggerRetry); } - static Stream> recoverableMissingIndexes() { - return Stream - .concat(IntStream.rangeClosed(1, 5).mapToObj(ImmutableList::of), Stream - .of(ImmutableList.of(2, 3), ImmutableList.of(2, 4), - ImmutableList.of(3, 5), ImmutableList.of(4, 5))); + static Stream recoverableMissingIndexes() { + Stream args = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), true)); + Stream args1 = IntStream.rangeClosed(1, 5).mapToObj(i -> arguments(ImmutableList.of(i), false)); + Stream args2 = Stream.of(arguments(ImmutableList.of(2, 3), true), + arguments(ImmutableList.of(2, 4), true), arguments(ImmutableList.of(3, 5), true)); + Stream args3 = Stream.of(arguments(ImmutableList.of(2, 3), false), + arguments(ImmutableList.of(2, 4), false), arguments(ImmutableList.of(3, 5), false)); + return Stream.concat(Stream.concat(args, args1), Stream.concat(args2, args3)); } /** @@ -647,7 +654,7 @@ static Stream> recoverableMissingIndexes() { public void testECReconstructionCoordinatorWithMissingIndexes135() { InsufficientLocationsException exception = assertThrows(InsufficientLocationsException.class, () -> { - testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3); + testECReconstructionCoordinator(ImmutableList.of(1, 3, 5), 3, false); }); String expectedMessage = @@ -658,7 +665,7 @@ public void testECReconstructionCoordinatorWithMissingIndexes135() { } private void testECReconstructionCoordinator(List missingIndexes, - int numInputChunks) throws Exception { + int numInputChunks, boolean triggerRetry) throws Exception { ObjectStore objectStore = rpcClient.getObjectStore(); String keyString = UUID.randomUUID().toString(); String volumeName = UUID.randomUUID().toString(); @@ -667,7 +674,7 @@ private void testECReconstructionCoordinator(List missingIndexes, objectStore.getVolume(volumeName).createBucket(bucketName); OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket bucket = volume.getBucket(bucketName); - createKeyAndWriteData(keyString, bucket, numInputChunks); + createKeyAndWriteData(keyString, bucket, numInputChunks, triggerRetry); try ( XceiverClientManager xceiverClientManager = @@ -779,7 +786,7 @@ private void testECReconstructionCoordinator(List missingIndexes, .getReplicationConfig(), cToken); assertEquals(blockDataArrList.get(i).length, reconstructedBlockData.length); - checkBlockData(blockDataArrList.get(i), reconstructedBlockData); + checkBlockDataWithRetry(blockDataArrList.get(i), reconstructedBlockData, triggerRetry); XceiverClientSpi client = xceiverClientManager.acquireClient( newTargetPipeline); try { @@ -800,7 +807,7 @@ private void testECReconstructionCoordinator(List missingIndexes, } private void createKeyAndWriteData(String keyString, OzoneBucket bucket, - int numChunks) throws IOException { + int numChunks, boolean triggerRetry) throws IOException { for (int i = 0; i < numChunks; i++) { inputChunks[i] = getBytesWith(i + 1, EC_CHUNK_SIZE); } @@ -809,11 +816,48 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket, new HashMap<>())) { assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); for (int i = 0; i < numChunks; i++) { + // We generally wait until the data is written to the last chunk + // before attempting to trigger CloseContainer. + // We use an asynchronous approach for this trigger, + // aiming to ensure that closing the container does not interfere with the write operation. + // However, this process often needs to be executed multiple times before it takes effect. + if (i == numChunks - 1 && triggerRetry) { + triggerRetryByCloseContainer(out); + } out.write(inputChunks[i]); } } } + private void triggerRetryByCloseContainer(OzoneOutputStream out) { + CompletableFuture.runAsync(() -> { + BlockOutputStreamEntry blockOutputStreamEntry = out.getKeyOutputStream().getStreamEntries().get(0); + BlockID entryBlockID = blockOutputStreamEntry.getBlockID(); + long entryContainerID = entryBlockID.getContainerID(); + Pipeline entryPipeline = blockOutputStreamEntry.getPipeline(); + Map replicaIndexes = entryPipeline.getReplicaIndexes(); + try { + for (Map.Entry entry : replicaIndexes.entrySet()) { + DatanodeDetails key = entry.getKey(); + Integer value = entry.getValue(); + XceiverClientManager xceiverClientManager = new XceiverClientManager(config); + Token cToken = containerTokenGenerator + .generateToken(ANY_USER, ContainerID.valueOf(entryContainerID)); + XceiverClientSpi client = xceiverClientManager.acquireClient( + createSingleNodePipeline(entryPipeline, key, value)); + try { + ContainerProtocolCalls.closeContainer(client, entryContainerID, cToken.encodeToUrlString()); + } finally { + xceiverClientManager.releaseClient(client, false); + } + break; + } + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + @Test public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() throws Exception { @@ -826,7 +870,7 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() objectStore.getVolume(volumeName).createBucket(bucketName); OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket bucket = volume.getBucket(bucketName); - createKeyAndWriteData(keyString, bucket, 3); + createKeyAndWriteData(keyString, bucket, 3, false); OzoneKeyDetails key = bucket.getKey(keyString); long conID = key.getOzoneKeyLocations().get(0).getContainerID(); @@ -900,6 +944,25 @@ private void closeContainer(long conID) HddsProtos.LifeCycleEvent.CLOSE); } + private void checkBlockDataWithRetry( + org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData, + org.apache.hadoop.ozone.container.common.helpers.BlockData[] + reconstructedBlockData, boolean triggerRetry) { + if (triggerRetry) { + for (int i = 0; i < reconstructedBlockData.length; i++) { + assertEquals(blockData[i].getBlockID(), reconstructedBlockData[i].getBlockID()); + List oldBlockDataChunks = blockData[i].getChunks(); + List newBlockDataChunks = reconstructedBlockData[i].getChunks(); + for (int j = 0; j < newBlockDataChunks.size(); j++) { + ContainerProtos.ChunkInfo chunkInfo = oldBlockDataChunks.get(j); + assertEquals(chunkInfo, newBlockDataChunks.get(j)); + } + } + return; + } + checkBlockData(blockData, reconstructedBlockData); + } + private void checkBlockData( org.apache.hadoop.ozone.container.common.helpers.BlockData[] blockData, org.apache.hadoop.ozone.container.common.helpers.BlockData[] @@ -967,8 +1030,7 @@ public static void prepareData(int[][] ranges) throws Exception { out.write(values[i]); } } -// List containerIDs = -// new ArrayList<>(scm.getContainerManager().getContainerIDs()); + List containerIDs = scm.getContainerManager().getContainers() .stream() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 9c76c0ec0c7..ff55ee83c17 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -35,11 +35,14 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.recon.ReconServer; +import org.apache.hadoop.ozone.s3.Gateway; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.function.CheckedFunction; +import com.amazonaws.services.s3.AmazonS3; + /** * Interface used for MiniOzoneClusters. */ @@ -142,10 +145,17 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, /** * Returns a {@link ReconServer} instance. * - * @return List of {@link ReconServer} + * @return {@link ReconServer} instance if it is initialized, otherwise null. */ ReconServer getReconServer(); + /** + * Returns a {@link Gateway} instance. + * + * @return {@link Gateway} instance if it is initialized, otherwise null. + */ + Gateway getS3G(); + /** * Returns an {@link OzoneClient} to access the {@link MiniOzoneCluster}. * The caller is responsible for closing the client after use. @@ -154,6 +164,11 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, */ OzoneClient newClient() throws IOException; + /** + * Returns an {@link AmazonS3} to access the {@link MiniOzoneCluster}. + */ + AmazonS3 newS3Client(); + /** * Returns StorageContainerLocationClient to communicate with * {@link StorageContainerManager} associated with the MiniOzoneCluster. @@ -219,6 +234,21 @@ void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode) */ void stopRecon(); + /** + * Start S3G. + */ + void startS3G(); + + /** + * Restart S3G. + */ + void restartS3G(); + + /** + * Stop S3G. + */ + void stopS3G(); + /** * Shutdown the MiniOzoneCluster and delete the storage dirs. */ @@ -273,6 +303,7 @@ abstract class Builder { protected String omId = UUID.randomUUID().toString(); protected boolean includeRecon = false; + protected boolean includeS3G = false; protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); protected int dnCurrentVersion = DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue(); @@ -382,6 +413,11 @@ public Builder includeRecon(boolean include) { return this; } + public Builder includeS3G(boolean include) { + this.includeS3G = include; + return this; + } + /** * Constructs and returns MiniOzoneCluster. * diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 50013b57f4c..3594996856a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -32,6 +32,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.regions.Regions; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -58,6 +66,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.CodecTestUtil; @@ -73,6 +82,10 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.recon.ConfigurationProvider; import org.apache.hadoop.ozone.recon.ReconServer; +import org.apache.hadoop.ozone.s3.Gateway; +import org.apache.hadoop.ozone.s3.OzoneClientCache; +import org.apache.hadoop.ozone.s3.OzoneConfigurationHolder; +import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; @@ -84,9 +97,14 @@ import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; +import static org.apache.hadoop.hdds.server.http.HttpConfig.getHttpPolicy; +import static org.apache.hadoop.hdds.server.http.HttpServer2.HTTPS_SCHEME; +import static org.apache.hadoop.hdds.server.http.HttpServer2.HTTP_SCHEME; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; @@ -120,6 +138,7 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private OzoneManager ozoneManager; private final List hddsDatanodes; private ReconServer reconServer; + private Gateway s3g; // Timeout for the cluster to be ready private int waitForClusterToBeReadyTimeout = 120000; // 2 min @@ -136,13 +155,15 @@ private MiniOzoneClusterImpl(OzoneConfiguration conf, OzoneManager ozoneManager, StorageContainerManager scm, List hddsDatanodes, - ReconServer reconServer) { + ReconServer reconServer, + Gateway s3g) { this.conf = conf; this.ozoneManager = ozoneManager; this.scm = scm; this.hddsDatanodes = hddsDatanodes; this.reconServer = reconServer; this.scmConfigurator = scmConfigurator; + this.s3g = s3g; } /** @@ -268,6 +289,11 @@ public ReconServer getReconServer() { return this.reconServer; } + @Override + public Gateway getS3G() { + return this.s3g; + } + @Override public int getHddsDatanodeIndex(DatanodeDetails dn) throws IOException { for (HddsDatanodeService service : hddsDatanodes) { @@ -286,6 +312,54 @@ public OzoneClient newClient() throws IOException { return client; } + @Override + public AmazonS3 newS3Client() { + // TODO: Parameterize tests between Virtual host style and Path style + return createS3Client(true); + } + + public AmazonS3 createS3Client(boolean enablePathStyle) { + final String accessKey = "user"; + final String secretKey = "password"; + final Regions region = Regions.DEFAULT_REGION; + + final String protocol; + final HttpConfig.Policy webPolicy = getHttpPolicy(conf); + String host; + + if (webPolicy.isHttpsEnabled()) { + protocol = HTTPS_SCHEME; + host = conf.get(OZONE_S3G_HTTPS_ADDRESS_KEY); + } else { + protocol = HTTP_SCHEME; + host = conf.get(OZONE_S3G_HTTP_ADDRESS_KEY); + } + + String endpoint = protocol + "://" + host; + + AWSCredentialsProvider credentials = new AWSStaticCredentialsProvider( + new BasicAWSCredentials(accessKey, secretKey) + ); + + + ClientConfiguration clientConfiguration = new ClientConfiguration(); + LOG.info("S3 Endpoint is {}", endpoint); + + AmazonS3 s3Client = + AmazonS3ClientBuilder.standard() + .withPathStyleAccessEnabled(enablePathStyle) + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration( + endpoint, region.getName() + ) + ) + .withClientConfiguration(clientConfiguration) + .withCredentials(credentials) + .build(); + + return s3Client; + } + protected OzoneClient createClient() throws IOException { return OzoneClientFactory.getRpcClient(conf); } @@ -428,6 +502,7 @@ public void stop() { stopDatanodes(hddsDatanodes); stopSCM(scm); stopRecon(reconServer); + stopS3G(s3g); } private void startHddsDatanode(HddsDatanodeService datanode) { @@ -467,6 +542,23 @@ public void stopRecon() { stopRecon(reconServer); } + @Override + public void startS3G() { + s3g = new Gateway(); + s3g.execute(NO_ARGS); + } + + @Override + public void restartS3G() { + stopS3G(s3g); + startS3G(); + } + + @Override + public void stopS3G() { + stopS3G(s3g); + } + private CertificateClient getCAClient() { return this.caClient; } @@ -521,6 +613,19 @@ private static void stopRecon(ReconServer reconServer) { } } + private static void stopS3G(Gateway s3g) { + try { + if (s3g != null) { + LOG.info("Stopping S3G"); + // TODO (HDDS-11539): Remove this workaround once the @PreDestroy issue is fixed + OzoneClientCache.closeClient(); + s3g.stop(); + } + } catch (Exception e) { + LOG.error("Exception while shutting down S3 Gateway.", e); + } + } + /** * Builder for configuring the MiniOzoneCluster to run. */ @@ -544,15 +649,17 @@ public MiniOzoneCluster build() throws IOException { OzoneManager om = null; ReconServer reconServer = null; List hddsDatanodes = Collections.emptyList(); + Gateway s3g = null; try { scm = createAndStartSingleSCM(); om = createAndStartSingleOM(); + s3g = createS3G(); reconServer = createRecon(); hddsDatanodes = createHddsDatanodes(); MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, scmConfigurator, om, scm, - hddsDatanodes, reconServer); + hddsDatanodes, reconServer, s3g); cluster.setCAClient(certClient); cluster.setSecretKeyClient(secretKeyClient); @@ -567,6 +674,9 @@ public MiniOzoneCluster build() throws IOException { if (includeRecon) { stopRecon(reconServer); } + if (includeS3G) { + stopS3G(s3g); + } if (startDataNodes) { stopDatanodes(hddsDatanodes); } @@ -740,6 +850,16 @@ protected ReconServer createRecon() { return reconServer; } + protected Gateway createS3G() { + Gateway s3g = null; + if (includeS3G) { + configureS3G(); + s3g = new Gateway(); + s3g.execute(NO_ARGS); + } + return s3g; + } + /** * Creates HddsDatanodeService(s) instance. * @@ -806,5 +926,14 @@ protected void configureRecon() { ConfigurationProvider.setConfiguration(conf); } + private void configureS3G() { + OzoneConfigurationHolder.resetConfiguration(); + + conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY, localhostWithFreePort()); + conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY, localhostWithFreePort()); + + OzoneConfigurationHolder.setConfiguration(conf); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index 26c1868084f..625cf15ea56 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -46,12 +46,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -109,9 +109,10 @@ public final class TestBlockTokens { private static final int EXPIRY_DURATION_IN_MS = 10000; private static final int ROTATION_CHECK_DURATION_IN_MS = 100; + @TempDir + private static File workDir; private static MiniKdc miniKdc; private static OzoneConfiguration conf; - private static File workDir; private static File ozoneKeytab; private static File spnegoKeytab; private static File testUserKeytab; @@ -129,9 +130,6 @@ public static void init() throws Exception { ExitUtils.disableSystemExit(); - workDir = - GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); - startMiniKdc(); setSecureConfig(); createCredentialsInKDC(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index 87242cb2790..038248945a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -33,6 +33,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,10 +87,12 @@ public final class TestBlockTokensCLI { private static final Logger LOG = LoggerFactory .getLogger(TestBlockTokensCLI.class); + + @TempDir + private static File workDir; private static MiniKdc miniKdc; private static OzoneAdmin ozoneAdmin; private static OzoneConfiguration conf; - private static File workDir; private static File ozoneKeytab; private static File spnegoKeytab; private static String host; @@ -100,13 +103,12 @@ public final class TestBlockTokensCLI { @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + conf = ozoneAdmin.getOzoneConf(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); ExitUtils.disableSystemExit(); - workDir = - GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); omServiceId = "om-service-test"; scmServiceId = "scm-service-test"; @@ -116,7 +118,6 @@ public static void init() throws Exception { setSecretKeysConfig(); startCluster(); client = cluster.newClient(); - ozoneAdmin = new OzoneAdmin(conf); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index cbd1829ef0c..798e8a15991 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -77,6 +77,7 @@ public static void setup() throws Exception { ozoneConf = new OzoneConfiguration(); ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); + ozoneConf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, "1"); cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build(); storageClient = new ContainerOperationClient(ozoneConf); cluster.waitForClusterToBeReady(); @@ -144,6 +145,24 @@ public void testCreate() throws Exception { .getContainerID()); } + /** + * Test to try to list number of containers over the max number Ozone allows. + * @throws Exception + */ + @Test + public void testListContainerExceedMaxAllowedCountOperations() throws Exception { + // create 2 containers in cluster where the limit of max count for + // listing container is set to 1 + for (int i = 0; i < 2; i++) { + storageClient.createContainer(HddsProtos + .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor + .ONE, OzoneConsts.OZONE); + } + + assertEquals(1, storageClient.listContainer(0, 2) + .getContainerInfoList().size()); + } + /** * A simple test to get Pipeline with {@link ContainerOperationClient}. * @throws Exception diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index 4488e467c29..eb54e4a2519 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -164,21 +164,57 @@ private static BucketLayout getDefaultBucketLayout(OzoneClient client) { public static OzoneBucket createBucket(OzoneClient client, String vol, BucketArgs bucketArgs, String bukName) throws IOException { + return createBucket(client, vol, bucketArgs, bukName, false); + } + + public static OzoneBucket createBucket(OzoneClient client, + String vol, BucketArgs bucketArgs, String bukName, + boolean createLinkedBucket) + throws IOException { ObjectStore objectStore = client.getObjectStore(); OzoneVolume volume = objectStore.getVolume(vol); - volume.createBucket(bukName, bucketArgs); - return volume.getBucket(bukName); + String sourceBucket = bukName; + if (createLinkedBucket) { + sourceBucket = bukName + RandomStringUtils.randomNumeric(5); + } + volume.createBucket(sourceBucket, bucketArgs); + OzoneBucket ozoneBucket = volume.getBucket(sourceBucket); + if (createLinkedBucket) { + ozoneBucket = createLinkedBucket(client, vol, sourceBucket, bukName); + } + return ozoneBucket; + } + + public static OzoneBucket createLinkedBucket(OzoneClient client, String vol, String sourceBucketName, + String linkedBucketName) throws IOException { + BucketArgs.Builder bb = new BucketArgs.Builder() + .setStorageType(StorageType.DEFAULT) + .setVersioning(false) + .setSourceVolume(vol) + .setSourceBucket(sourceBucketName); + return createBucket(client, vol, bb.build(), linkedBucketName); + } + + public static OzoneBucket createVolumeAndBucket(OzoneClient client, + BucketLayout bucketLayout) + throws IOException { + return createVolumeAndBucket(client, bucketLayout, false); } public static OzoneBucket createVolumeAndBucket(OzoneClient client, - BucketLayout bucketLayout) throws IOException { + BucketLayout bucketLayout, boolean createLinkedBucket) throws IOException { final int attempts = 5; for (int i = 0; i < attempts; i++) { try { String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - return createVolumeAndBucket(client, volumeName, bucketName, + OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, bucketName, bucketLayout); + if (createLinkedBucket) { + String targetBucketName = ozoneBucket.getName() + RandomStringUtils.randomNumeric(5); + ozoneBucket = createLinkedBucket(client, volumeName, bucketName, targetBucketName); + } + return ozoneBucket; } catch (OMException e) { if (e.getResult() != OMException.ResultCodes.VOLUME_ALREADY_EXISTS && e.getResult() != OMException.ResultCodes.BUCKET_ALREADY_EXISTS) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index 099c1d2e1ff..e0c2a292397 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -119,10 +119,11 @@ public final class TestDelegationToken { @TempDir private Path folder; + @TempDir + private File workDir; private MiniKdc miniKdc; private OzoneConfiguration conf; - private File workDir; private File scmKeytab; private File spnegoKeytab; private File omKeyTab; @@ -166,8 +167,6 @@ public void init() { conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name()); - workDir = GenericTestUtils.getTestDir(getClass().getSimpleName()); - startMiniKdc(); setSecureConfig(); createCredentialsInKDC(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 8a55dc7b7d0..2a150683001 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -139,7 +139,7 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(BUCKET, KEY, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -168,7 +168,7 @@ private void completeMultipartUpload( private void getObjectMultipart(int partNumber, long bytes) throws IOException, OS3Exception { Response response = - REST.get(BUCKET, KEY, partNumber, null, 100, null); + REST.get(BUCKET, KEY, partNumber, null, 100, null, null); assertEquals(200, response.getStatus()); assertEquals(bytes, response.getLength()); assertEquals("3", response.getHeaderString(MP_PARTS_COUNT)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java index cef872597e4..6b7dde3934d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone; import com.google.common.collect.ImmutableMap; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -32,14 +31,15 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.util.List; @@ -61,23 +61,25 @@ @Timeout(300) public class TestOMSortDatanodes { + @TempDir + private static File dir; private static OzoneConfiguration config; private static StorageContainerManager scm; private static NodeManager nodeManager; private static KeyManagerImpl keyManager; private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneManager om; - private static File dir; private static final int NODE_COUNT = 10; private static final Map EDGE_NODES = ImmutableMap.of( "edge0", "/rack0", "edge1", "/rack1" ); + private static OzoneClient ozoneClient; + @BeforeAll public static void setup() throws Exception { config = new OzoneConfiguration(); - dir = GenericTestUtils.getRandomizedTestDir(); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); config.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class.getName()); @@ -109,11 +111,15 @@ public static void setup() throws Exception { = new OmTestManagers(config, scm.getBlockProtocolServer(), mockScmContainerClient); om = omTestManagers.getOzoneManager(); + ozoneClient = omTestManagers.getRpcClient(); keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); } @AfterAll public static void cleanup() throws Exception { + if (ozoneClient != null) { + ozoneClient.close(); + } if (scm != null) { scm.stop(); scm.join(); @@ -121,7 +127,6 @@ public static void cleanup() throws Exception { if (om != null) { om.stop(); } - FileUtils.deleteDirectory(dir); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index cc1f93fbc1e..1fbfc1f1f70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -93,6 +93,7 @@ private void addPropertiesNotInXml() { OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, OMConfigKeys.OZONE_FS_TRASH_CHECKPOINT_INTERVAL_KEY, OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED, + OMConfigKeys.OZONE_OM_FEATURES_DISABLED, OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE, OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY, OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS, @@ -125,6 +126,7 @@ private void addPropertiesNotInXml() { OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS_KEY, OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER, OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD, + OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION, ScmConfigKeys.OZONE_SCM_PIPELINE_PLACEMENT_IMPL_KEY, ScmConfigKeys.OZONE_SCM_HA_PREFIX, S3GatewayConfigKeys.OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 4f41d516153..637e8bd9e4f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultApprover; @@ -100,6 +101,7 @@ import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.security.OMCertificateClient; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.SecretKeyTestClient; import org.apache.hadoop.security.KerberosAuthException; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.SecurityUtil; @@ -152,7 +154,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_MISMATCH; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; -import org.apache.ozone.test.LambdaTestUtils; import org.apache.ozone.test.tag.Flaky; import org.apache.ozone.test.tag.Unhealthy; import org.apache.ratis.protocol.ClientId; @@ -1182,10 +1183,10 @@ public String renewAndStoreKeyAndCertificate(boolean force) throws CertificateEx } /** - * Tests delegation token renewal after a certificate renew. + * Tests delegation token renewal after a secret key rotation. */ @Test - void testDelegationTokenRenewCrossCertificateRenew() throws Exception { + void testDelegationTokenRenewCrossSecretKeyRotation() throws Exception { initSCM(); try { scm = HddsTestUtils.getScmSimple(conf); @@ -1206,11 +1207,12 @@ void testDelegationTokenRenewCrossCertificateRenew() throws Exception { CertificateClientTestImpl certClient = new CertificateClientTestImpl(newConf, true); - X509Certificate omCert = certClient.getCertificate(); - String omCertId1 = omCert.getSerialNumber().toString(); // Start OM om.setCertClient(certClient); om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); + SecretKeyTestClient secretKeyClient = new SecretKeyTestClient(); + ManagedSecretKey secretKey1 = secretKeyClient.getCurrentSecretKey(); + om.setSecretKeyClient(secretKeyClient); om.start(); GenericTestUtils.waitFor(() -> om.isLeaderReady(), 100, 10000); @@ -1231,30 +1233,26 @@ void testDelegationTokenRenewCrossCertificateRenew() throws Exception { assertEquals(SecurityUtil.buildTokenService( om.getNodeDetails().getRpcAddress()).toString(), token1.getService().toString()); - assertEquals(omCertId1, token1.decodeIdentifier().getOmCertSerialId()); + assertEquals(secretKey1.getId().toString(), token1.decodeIdentifier().getSecretKeyId()); // Renew delegation token long expiryTime = omClient.renewDelegationToken(token1); assertThat(expiryTime).isGreaterThan(0); - // Wait for OM certificate to renew - LambdaTestUtils.await(certLifetime, 100, () -> - !StringUtils.equals(token1.decodeIdentifier().getOmCertSerialId(), - omClient.getDelegationToken(new Text("om")) - .decodeIdentifier().getOmCertSerialId())); - String omCertId2 = - certClient.getCertificate().getSerialNumber().toString(); - assertNotEquals(omCertId1, omCertId2); + // Rotate secret key + secretKeyClient.rotate(); + ManagedSecretKey secretKey2 = secretKeyClient.getCurrentSecretKey(); + assertNotEquals(secretKey1.getId(), secretKey2.getId()); // Get a new delegation token Token token2 = omClient.getDelegationToken( new Text("om")); - assertEquals(omCertId2, token2.decodeIdentifier().getOmCertSerialId()); + assertEquals(secretKey2.getId().toString(), token2.decodeIdentifier().getSecretKeyId()); - // Because old certificate is still valid, so renew old token will succeed + // Because old secret key is still valid, so renew old token will succeed expiryTime = omClient.renewDelegationToken(token1); assertThat(expiryTime) .isGreaterThan(0) - .isLessThan(omCert.getNotAfter().getTime()); + .isLessThan(secretKey2.getExpiryTime().toEpochMilli()); } finally { if (scm != null) { scm.stop(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java deleted file mode 100644 index 766ed09bccd..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ /dev/null @@ -1,490 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.ozone.ClientConfigForTesting; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.ECKeyOutputStream; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.TestHelper; -import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; - -/** - * Tests key output stream. - */ -abstract class AbstractTestECKeyOutputStream { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - private static int dataBlocks = 3; - private static int inputSize = dataBlocks * chunkSize; - private static byte[][] inputChunks = new byte[dataBlocks][chunkSize]; - - /** - * Create a MiniDFSCluster for testing. - */ - protected static void init(boolean zeroCopyEnabled) throws Exception { - chunkSize = 1024 * 1024; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - - OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); - clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE); - clientConfig.setStreamBufferFlushDelay(false); - conf.setFromObject(clientConfig); - - // If SCM detects dead node too quickly, then container would be moved to - // closed state and all in progress writes will get exception. To avoid - // that, we are just keeping higher timeout and none of the tests depending - // on deadnode detection timeout currently. - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 60, TimeUnit.SECONDS); - conf.setTimeDuration("hdds.ratis.raft.server.rpc.slowness.timeout", 300, - TimeUnit.SECONDS); - conf.setTimeDuration( - "hdds.ratis.raft.server.notification.no-leader.timeout", 300, - TimeUnit.SECONDS); - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 500, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, - TimeUnit.SECONDS); - conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED, - zeroCopyEnabled); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); - // "Enable" hsync to verify that hsync would be blocked by ECKeyOutputStream - conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); - - ClientConfigForTesting.newBuilder(StorageUnit.BYTES) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .applyTo(conf); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .build(); - cluster.waitForClusterToBeReady(); - client = OzoneClientFactory.getRpcClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "testeckeyoutputstream"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - initInputChunks(); - } - - @BeforeAll - public static void init() throws Exception { - init(false); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterAll - public static void shutdown() { - IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testCreateKeyWithECReplicationConfig() throws Exception { - try (OzoneOutputStream key = TestHelper - .createKey(keyString, new ECReplicationConfig(3, 2, - ECReplicationConfig.EcCodec.RS, chunkSize), inputSize, - objectStore, volumeName, bucketName)) { - assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); - } - } - - @Test - public void testCreateKeyWithOutBucketDefaults() throws Exception { - OzoneVolume volume = objectStore.getVolume(volumeName); - OzoneBucket bucket = volume.getBucket(bucketName); - try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) { - assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - } - - @Test - public void testCreateKeyWithBucketDefaults() throws Exception { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - bucketArgs.setDefaultReplicationConfig( - new DefaultReplicationConfig( - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize))); - - volume.createBucket(myBucket, bucketArgs.build()); - OzoneBucket bucket = volume.getBucket(myBucket); - - try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) { - assertInstanceOf(ECKeyOutputStream.class, out.getOutputStream()); - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - byte[] buf = new byte[chunkSize]; - try (OzoneInputStream in = bucket.readKey(keyString)) { - for (byte[] inputChunk : inputChunks) { - int read = in.read(buf, 0, chunkSize); - assertEquals(chunkSize, read); - assertArrayEquals(buf, inputChunk); - } - } - } - - @Test - public void testOverwriteECKeyWithRatisKey() throws Exception { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - volume.createBucket(myBucket, bucketArgs.build()); - OzoneBucket bucket = volume.getBucket(myBucket); - createKeyAndCheckReplicationConfig(keyString, bucket, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize)); - - //Overwrite with RATIS/THREE - createKeyAndCheckReplicationConfig(keyString, bucket, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); - - //Overwrite with RATIS/ONE - createKeyAndCheckReplicationConfig(keyString, bucket, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); - } - - @Test - public void testOverwriteRatisKeyWithECKey() throws Exception { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - volume.createBucket(myBucket, bucketArgs.build()); - OzoneBucket bucket = volume.getBucket(myBucket); - - createKeyAndCheckReplicationConfig(keyString, bucket, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); - // Overwrite with EC key - createKeyAndCheckReplicationConfig(keyString, bucket, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize)); - } - - private void createKeyAndCheckReplicationConfig(String keyName, - OzoneBucket bucket, ReplicationConfig replicationConfig) - throws IOException { - try (OzoneOutputStream out = bucket - .createKey(keyName, inputSize, replicationConfig, new HashMap<>())) { - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - OzoneKeyDetails key = bucket.getKey(keyName); - assertEquals(replicationConfig, key.getReplicationConfig()); - } - - @Test - public void testCreateRatisKeyAndWithECBucketDefaults() throws Exception { - OzoneBucket bucket = getOzoneBucket(); - try (OzoneOutputStream out = bucket.createKey( - "testCreateRatisKeyAndWithECBucketDefaults", 2000, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), - new HashMap<>())) { - assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - } - - @Test - public void test13ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(13); - } - - @Test - public void testChunksInSingleWriteOpWithOffset() throws IOException { - testMultipleChunksInSingleWriteOp(11, 25, 19); - } - - @Test - public void test15ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(15); - } - - @Test - public void test20ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(20); - } - - @Test - public void test21ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(21); - } - - private void testMultipleChunksInSingleWriteOp(int offset, - int bufferChunks, int numChunks) - throws IOException { - byte[] inputData = getInputBytes(offset, bufferChunks, numChunks); - final OzoneBucket bucket = getOzoneBucket(); - String keyName = - String.format("testMultipleChunksInSingleWriteOpOffset" + - "%dBufferChunks%dNumChunks", offset, bufferChunks, - numChunks); - try (OzoneOutputStream out = bucket.createKey(keyName, 4096, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize), new HashMap<>())) { - out.write(inputData, offset, numChunks * chunkSize); - } - - validateContent(offset, numChunks * chunkSize, inputData, bucket, - bucket.getKey(keyName)); - } - - private void testMultipleChunksInSingleWriteOp(int numChunks) - throws IOException { - testMultipleChunksInSingleWriteOp(0, numChunks, numChunks); - } - - @Test - public void testECContainerKeysCountAndNumContainerReplicas() - throws IOException, InterruptedException, TimeoutException { - byte[] inputData = getInputBytes(1); - final OzoneBucket bucket = getOzoneBucket(); - ContainerOperationClient containerOperationClient = - new ContainerOperationClient(conf); - - ECReplicationConfig repConfig = new ECReplicationConfig( - 3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); - // Close all EC pipelines so we must get a fresh pipeline and hence - // container for this test. - PipelineManager pm = - cluster.getStorageContainerManager().getPipelineManager(); - for (Pipeline p : pm.getPipelines(repConfig)) { - pm.closePipeline(p, true); - } - - String keyName = UUID.randomUUID().toString(); - try (OzoneOutputStream out = bucket.createKey(keyName, 4096, - repConfig, new HashMap<>())) { - out.write(inputData); - } - OzoneKeyDetails key = bucket.getKey(keyName); - long currentKeyContainerID = - key.getOzoneKeyLocations().get(0).getContainerID(); - - GenericTestUtils.waitFor(() -> { - try { - return (containerOperationClient.getContainer(currentKeyContainerID) - .getNumberOfKeys() == 1) && (containerOperationClient - .getContainerReplicas(currentKeyContainerID).size() == 5); - } catch (IOException exception) { - fail("Unexpected exception " + exception); - return false; - } - }, 100, 10000); - validateContent(inputData, bucket, key); - } - - private void validateContent(byte[] inputData, OzoneBucket bucket, - OzoneKey key) throws IOException { - validateContent(0, inputData.length, inputData, bucket, key); - } - - private void validateContent(int offset, int length, byte[] inputData, - OzoneBucket bucket, - OzoneKey key) throws IOException { - try (OzoneInputStream is = bucket.readKey(key.getName())) { - byte[] fileContent = new byte[length]; - assertEquals(length, is.read(fileContent)); - assertEquals(new String(Arrays.copyOfRange(inputData, offset, - offset + length), UTF_8), - new String(fileContent, UTF_8)); - } - } - - private OzoneBucket getOzoneBucket() throws IOException { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - bucketArgs.setDefaultReplicationConfig( - new DefaultReplicationConfig( - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize))); - - volume.createBucket(myBucket, bucketArgs.build()); - return volume.getBucket(myBucket); - } - - private static void initInputChunks() { - for (int i = 0; i < dataBlocks; i++) { - inputChunks[i] = getBytesWith(i + 1, chunkSize); - } - } - - private static byte[] getBytesWith(int singleDigitNumber, int total) { - StringBuilder builder = new StringBuilder(singleDigitNumber); - for (int i = 1; i <= total; i++) { - builder.append(singleDigitNumber); - } - return builder.toString().getBytes(UTF_8); - } - - @Test - public void testWriteShouldSucceedWhenDNKilled() throws Exception { - int numChunks = 3; - byte[] inputData = getInputBytes(numChunks); - final OzoneBucket bucket = getOzoneBucket(); - String keyName = "testWriteShouldSucceedWhenDNKilled" + numChunks; - DatanodeDetails nodeToKill = null; - try { - try (OzoneOutputStream out = bucket.createKey(keyName, 1024, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize), new HashMap<>())) { - ECKeyOutputStream ecOut = (ECKeyOutputStream) out.getOutputStream(); - out.write(inputData); - // Kill a node from first pipeline - nodeToKill = ecOut.getStreamEntries() - .get(0).getPipeline().getFirstNode(); - cluster.shutdownHddsDatanode(nodeToKill); - - out.write(inputData); - - // Wait for flushing thread to finish its work. - final long checkpoint = System.currentTimeMillis(); - ecOut.insertFlushCheckpoint(checkpoint); - GenericTestUtils.waitFor(() -> ecOut.getFlushCheckpoint() == checkpoint, - 100, 10000); - - // Check the second blockGroup pipeline to make sure that the failed - // node is not selected. - assertThat(ecOut.getStreamEntries().get(1).getPipeline().getNodes()) - .doesNotContain(nodeToKill); - } - - try (OzoneInputStream is = bucket.readKey(keyName)) { - // We wrote "inputData" twice, so do two reads and ensure the correct - // data comes back. - for (int i = 0; i < 2; i++) { - byte[] fileContent = new byte[inputData.length]; - assertEquals(inputData.length, is.read(fileContent)); - assertEquals(new String(inputData, UTF_8), - new String(fileContent, UTF_8)); - } - } - } finally { - cluster.restartHddsDatanode(nodeToKill, true); - } - } - - private byte[] getInputBytes(int numChunks) { - return getInputBytes(0, numChunks, numChunks); - } - - private byte[] getInputBytes(int offset, int bufferChunks, int numChunks) { - byte[] inputData = new byte[offset + bufferChunks * chunkSize]; - for (int i = 0; i < numChunks; i++) { - int start = offset + (i * chunkSize); - Arrays.fill(inputData, start, start + chunkSize - 1, - String.valueOf(i % 9).getBytes(UTF_8)[0]); - } - return inputData; - } - - @Test - public void testBlockedHflushAndHsync() throws Exception { - // Expect ECKeyOutputStream hflush and hsync calls to throw exception - try (OzoneOutputStream oOut = TestHelper.createKey( - keyString, new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize), - inputSize, objectStore, volumeName, bucketName)) { - assertInstanceOf(ECKeyOutputStream.class, oOut.getOutputStream()); - KeyOutputStream kOut = (KeyOutputStream) oOut.getOutputStream(); - - assertThrows(NotImplementedException.class, () -> kOut.hflush()); - assertThrows(NotImplementedException.class, () -> kOut.hsync()); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index eb9f35f518c..7e518687bea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -24,6 +24,7 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; @@ -32,12 +33,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.CountDownLatch; @@ -88,7 +91,6 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneKeyDetails; @@ -170,6 +172,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; @@ -188,10 +192,12 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.slf4j.event.Level.DEBUG; +import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.EnumSource; @@ -221,6 +227,7 @@ abstract class OzoneRpcClientTests extends OzoneTestBase { private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, remoteGroupName, ACCESS, READ); private static MessageDigest eTagProvider; + private static Set ozoneClients = new HashSet<>(); @BeforeAll public static void initialize() throws NoSuchAlgorithmException { @@ -250,6 +257,7 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); + ozoneClients.add(ozClient); store = ozClient.getObjectStore(); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); @@ -259,10 +267,9 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build /** * Close OzoneClient and shutdown MiniOzoneCluster. */ - static void shutdownCluster() throws IOException { - if (ozClient != null) { - ozClient.close(); - } + static void shutdownCluster() { + org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(ozoneClients); + ozoneClients.clear(); if (storageContainerLocationClient != null) { storageContainerLocationClient.close(); @@ -274,6 +281,7 @@ static void shutdownCluster() throws IOException { } private static void setOzClient(OzoneClient ozClient) { + ozoneClients.add(ozClient); OzoneRpcClientTests.ozClient = ozClient; } @@ -388,10 +396,10 @@ public void testBucketSetOwner() throws IOException { .setVolumeName(volumeName).setBucketName(bucketName) .setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.BUCKET).build(); - store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(volumeObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user2", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user2", ACCESS, ALL)); createKeyForUser(volumeName, bucketName, key1, content, user1); createKeyForUser(volumeName, bucketName, key2, content, user2); @@ -619,7 +627,7 @@ public void testDeleteVolume() @Test public void testCreateVolumeWithMetadata() - throws IOException, OzoneClientException { + throws IOException { String volumeName = UUID.randomUUID().toString(); VolumeArgs volumeArgs = VolumeArgs.newBuilder() .addMetadata("key1", "val1") @@ -634,7 +642,7 @@ public void testCreateVolumeWithMetadata() @Test public void testCreateBucketWithMetadata() - throws IOException, OzoneClientException { + throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -773,7 +781,7 @@ public void testCreateBucketWithAllArgument() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); ReplicationConfig repConfig = new ECReplicationConfig(3, 2); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); @@ -812,7 +820,7 @@ public void testAddBucketAcl() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); List acls = new ArrayList<>(); - acls.add(new OzoneAcl(USER, "test", ACCESS, ACLType.ALL)); + acls.add(new OzoneAcl(USER, "test", ACCESS, ALL)); OzoneBucket bucket = volume.getBucket(bucketName); for (OzoneAcl acl : acls) { assertTrue(bucket.addAcl(acl)); @@ -828,7 +836,7 @@ public void testRemoveBucketAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder() @@ -847,9 +855,9 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); OzoneAcl acl2 = new OzoneAcl(USER, "test1", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder() @@ -907,6 +915,64 @@ public void testAclsAfterCallingSetBucketProperty() throws Exception { } + @Test + public void testAclDeDuplication() + throws IOException { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OzoneAcl userAcl1 = new OzoneAcl(USER, "test", DEFAULT, READ); + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + OzoneAcl currentUserAcl = new OzoneAcl(USER, currentUser.getShortUserName(), ACCESS, ALL); + OzoneAcl currentUserPrimaryGroupAcl = new OzoneAcl(GROUP, currentUser.getPrimaryGroupName(), ACCESS, READ, LIST); + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner(currentUser.getShortUserName()) + .setAdmin(currentUser.getShortUserName()) + .addAcl(userAcl1) + .addAcl(currentUserAcl) + .addAcl(currentUserPrimaryGroupAcl) + .build(); + + store.createVolume(volumeName, createVolumeArgs); + OzoneVolume volume = store.getVolume(volumeName); + List volumeAcls = volume.getAcls(); + assertEquals(3, volumeAcls.size()); + assertTrue(volumeAcls.contains(userAcl1)); + assertTrue(volumeAcls.contains(currentUserAcl)); + assertTrue(volumeAcls.contains(currentUserPrimaryGroupAcl)); + + // normal bucket + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(currentUserAcl).addAcl(currentUserPrimaryGroupAcl); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + List bucketAcls = bucket.getAcls(); + assertEquals(bucketName, bucket.getName()); + assertEquals(3, bucketAcls.size()); + assertTrue(bucketAcls.contains(currentUserAcl)); + assertTrue(bucketAcls.contains(currentUserPrimaryGroupAcl)); + assertTrue(bucketAcls.get(2).getName().equals(userAcl1.getName())); + assertTrue(bucketAcls.get(2).getAclList().equals(userAcl1.getAclList())); + assertTrue(bucketAcls.get(2).getAclScope().equals(ACCESS)); + + // link bucket + OzoneAcl userAcl2 = new OzoneAcl(USER, "test-link", DEFAULT, READ); + String linkBucketName = "link-" + bucketName; + builder = BucketArgs.newBuilder().setSourceVolume(volumeName).setSourceBucket(bucketName) + .addAcl(currentUserAcl).addAcl(currentUserPrimaryGroupAcl).addAcl(userAcl2); + volume.createBucket(linkBucketName, builder.build()); + OzoneBucket linkBucket = volume.getBucket(linkBucketName); + List linkBucketAcls = linkBucket.getAcls(); + assertEquals(linkBucketName, linkBucket.getName()); + assertEquals(5, linkBucketAcls.size()); + assertTrue(linkBucketAcls.contains(currentUserAcl)); + assertTrue(linkBucketAcls.contains(currentUserPrimaryGroupAcl)); + assertTrue(linkBucketAcls.contains(userAcl2)); + assertTrue(linkBucketAcls.contains(OzoneAcl.LINK_BUCKET_DEFAULT_ACL)); + assertTrue(linkBucketAcls.get(4).getName().equals(userAcl1.getName())); + assertTrue(linkBucketAcls.get(4).getAclList().equals(userAcl1.getAclList())); + assertTrue(linkBucketAcls.get(4).getAclScope().equals(ACCESS)); + } + @Test public void testSetBucketStorageType() throws IOException { @@ -3023,10 +3089,10 @@ public void testMultipartUploadWithACL() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Add ACL on Bucket - OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ACLType.ALL); - OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ACLType.ALL); - OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ACLType.ALL); - OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ACLType.ALL); + OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ALL); + OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ALL); + OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ALL); + OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ALL); bucket.addAcl(acl1); bucket.addAcl(acl2); bucket.addAcl(acl3); @@ -3140,6 +3206,37 @@ void testMultipartUploadOverride(ReplicationConfig replication) doMultipartUpload(bucket, keyName, (byte)97, replication); } + + /** + * This test prints out that there is a memory leak in the test logs + * which during post-processing is caught by the CI thereby failing the + * CI run. Hence, disabling this for CI. + */ + @Unhealthy + public void testClientLeakDetector() throws Exception { + OzoneClient client = OzoneClientFactory.getRpcClient(cluster.getConf()); + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + GenericTestUtils.LogCapturer ozoneClientFactoryLogCapturer = + GenericTestUtils.LogCapturer.captureLogs( + OzoneClientFactory.getLogger()); + + client.getObjectStore().createVolume(volumeName); + OzoneVolume volume = client.getObjectStore().getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + byte[] data = new byte[10]; + Arrays.fill(data, (byte) 1); + try (OzoneOutputStream out = bucket.createKey(keyName, 10, + ReplicationConfig.fromTypeAndFactor(RATIS, ONE), new HashMap<>())) { + out.write(data); + } + client = null; + System.gc(); + GenericTestUtils.waitFor(() -> ozoneClientFactoryLogCapturer.getOutput() + .contains("is not closed properly"), 100, 2000); + } @Test public void testMultipartUploadOwner() throws Exception { // Save the old user, and switch to the old user after test @@ -3166,10 +3263,10 @@ public void testMultipartUploadOwner() throws Exception { .setVolumeName(volumeName).setBucketName(bucketName) .setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.BUCKET).build(); - store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(volumeObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "awsUser1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "awsUser1", ACCESS, ALL)); // user1 MultipartUpload a key UserGroupInformation.setLoginUser(user1); @@ -3906,7 +4003,7 @@ public void testNativeAclsForPrefix() throws Exception { aclsGet = store.getAcl(prefixObj); assertEquals(0, aclsGet.size()); - OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ACLType.ALL); + OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ALL); List acls = new ArrayList<>(); acls.add(user1Acl); acls.add(group1Acl); @@ -3943,14 +4040,12 @@ private List getAclList(OzoneConfiguration conf) //User ACL UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); - ACLType groupRights = aclConfig.getGroupDefaultRights(); - - listOfAcls.add(new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(ugi.getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, ACCESS, groupRights))); + ACLType[] userRights = aclConfig.getUserDefaultRights(); + ACLType[] groupRights = aclConfig.getGroupDefaultRights(); + + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userRights)); + //Group ACL of the User + listOfAcls.add(new OzoneAcl(GROUP, ugi.getPrimaryGroupName(), ACCESS, groupRights)); return listOfAcls; } @@ -4019,7 +4114,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { OzoneAcl ua = new OzoneAcl(USER, "userx", ACCESS, ACLType.READ_ACL); OzoneAcl ug = new OzoneAcl(GROUP, "userx", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.setAcl(ozObj, Arrays.asList(ua, ug)); newAcls = store.getAcl(ozObj); assertEquals(2, newAcls.size()); @@ -4774,17 +4869,13 @@ void testMultiPartUploadWithStream(ReplicationConfig replicationConfig) } @Test - public void testUploadWithStreamAndMemoryMappedBuffer() throws IOException { - // create a local dir - final String dir = GenericTestUtils.getTempPath( - getClass().getSimpleName()); - GenericTestUtils.assertDirCreation(new File(dir)); + public void testUploadWithStreamAndMemoryMappedBuffer(@TempDir Path dir) throws IOException { // create a local file final int chunkSize = 1024; final byte[] data = new byte[8 * chunkSize]; ThreadLocalRandom.current().nextBytes(data); - final File file = new File(dir, "data"); + final File file = new File(dir.toString(), "data"); try (FileOutputStream out = new FileOutputStream(file)) { out.write(data); } @@ -4955,4 +5046,136 @@ public void reset() throws IOException { init(); } } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testPutObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>()); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertTrue(key.getTags().isEmpty()); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + bucket.putObjectTagging(keyName, tags); + + OzoneKey updatedKey = bucket.getKey(keyName); + assertEquals(tags.size(), updatedKey.getTags().size()); + assertEquals(key.getModificationTime(), updatedKey.getModificationTime()); + assertThat(updatedKey.getTags()).containsAllEntriesOf(tags); + + // Do another putObjectTagging, it should override the previous one + Map secondTags = new HashMap<>(); + secondTags.put("tag-key-3", "tag-value-3"); + + bucket.putObjectTagging(keyName, secondTags); + + updatedKey = bucket.getKey(keyName); + assertEquals(secondTags.size(), updatedKey.getTags().size()); + assertThat(updatedKey.getTags()).containsAllEntriesOf(secondTags); + assertThat(updatedKey.getTags()).doesNotContainKeys("tag-key-1", "tag-key-2"); + + if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + String dirKey = "dir1/"; + bucket.createDirectory(dirKey); + OMException exception = assertThrows(OMException.class, + () -> bucket.putObjectTagging(dirKey, tags)); + assertThat(exception.getResult()).isEqualTo(ResultCodes.NOT_SUPPORTED_OPERATION); + } + } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testDeleteObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>(), tags); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertFalse(key.getTags().isEmpty()); + + bucket.deleteObjectTagging(keyName); + + OzoneKey updatedKey = bucket.getKey(keyName); + assertEquals(0, updatedKey.getTags().size()); + assertEquals(key.getModificationTime(), updatedKey.getModificationTime()); + + if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + String dirKey = "dir1/"; + bucket.createDirectory(dirKey); + OMException exception = assertThrows(OMException.class, + () -> bucket.deleteObjectTagging(dirKey)); + assertThat(exception.getResult()).isEqualTo(ResultCodes.NOT_SUPPORTED_OPERATION); + } + } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testGetObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>(), tags); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertEquals(tags.size(), key.getTags().size()); + + Map tagsRetrieved = bucket.getObjectTagging(keyName); + + assertEquals(tags.size(), tagsRetrieved.size()); + assertThat(tagsRetrieved).containsAllEntriesOf(tags); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index d668bb4b652..c66ca2931bd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -32,13 +32,11 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.List; @@ -75,10 +73,6 @@ public class TestBCSID { */ @BeforeAll public static void init() throws Exception { - String path = GenericTestUtils - .getTempPath(TestBCSID.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 8810bab5190..63692c0dfc7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream; import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -96,6 +97,9 @@ static MiniOzoneCluster createCluster() throws IOException, conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 3); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -271,6 +275,7 @@ void testWriteLessThanChunkSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteExactlyFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -478,6 +483,7 @@ void testWriteMoreThanChunkSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -568,6 +574,7 @@ void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -662,7 +669,7 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking @ParameterizedTest @MethodSource("clientParameters") - @Flaky("HDDS-11325") + @Flaky("HDDS-11564") void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index f823add57bd..010bd93834b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -93,7 +93,6 @@ private static Stream clientParameters() { @ParameterizedTest @MethodSource("clientParameters") - @Flaky("HDDS-11325") void testContainerClose(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -273,6 +272,7 @@ void testWatchForCommitDatanodeFailure(boolean flushDelay, boolean enablePiggyba @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11849") void test2DatanodesFailure(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -386,7 +386,8 @@ private void testWriteMoreThanMaxFlushSize(OzoneClient client) assertInstanceOf(RatisBlockOutputStream.class, keyOutputStream.getStreamEntries().get(0).getOutputStream()); - assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(4); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(400, blockOutputStream.getTotalDataFlushedLength()); @@ -442,7 +443,8 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception { assertInstanceOf(RatisBlockOutputStream.class, keyOutputStream.getStreamEntries().get(0).getOutputStream()); - assertEquals(2, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(2); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(0, blockOutputStream.getTotalDataFlushedLength()); @@ -455,7 +457,8 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception { // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures - assertEquals(2, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(2); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); @@ -506,9 +509,10 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) keyOutputStream.getStreamEntries().get(0).getOutputStream()); // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 4 buffers allocated worth of chunk size + // buffer pool will have up to 4 buffers allocated worth of chunk size - assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(4); // writtenDataLength as well flushedDataLength will be updated here assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); @@ -531,7 +535,8 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures - assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(4); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index 78a4e78647e..8d894471289 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -49,7 +49,6 @@ import org.junit.jupiter.api.Test; import org.slf4j.LoggerFactory; -import java.io.File; import java.io.IOException; import java.time.Duration; import java.util.HashMap; @@ -80,7 +79,6 @@ public class TestContainerReplicationEndToEnd { private static ObjectStore objectStore; private static String volumeName; private static String bucketName; - private static String path; private static XceiverClientManager xceiverClientManager; private static long containerReportInterval; @@ -92,10 +90,7 @@ public class TestContainerReplicationEndToEnd { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); + containerReportInterval = 2000; conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 3f1c31edfe7..dc00b0acc55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.RatisServerConfiguration; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; @@ -78,7 +77,6 @@ public class TestContainerStateMachine { private ObjectStore objectStore; private String volumeName; private String bucketName; - private String path; /** * Create a MiniDFSCluster for testing. @@ -87,10 +85,6 @@ public class TestContainerStateMachine { */ @BeforeEach public void setup() throws Exception { - path = GenericTestUtils - .getTempPath(TestContainerStateMachine.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index eea068a8742..f351ad8927a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -46,7 +46,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -80,10 +79,6 @@ public class TestContainerStateMachineFailureOnRead { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); - String path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index b6eaca8e80d..e3759521c82 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.time.Duration; @@ -32,6 +33,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsUtils; @@ -40,6 +42,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; @@ -50,6 +53,7 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -264,6 +268,57 @@ public void testContainerStateMachineCloseOnMissingPipeline() key.close(); } + + @Test + public void testContainerStateMachineRestartWithDNChangePipeline() + throws Exception { + try (OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("testDNRestart", 1024, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>())) { + key.write("ratis".getBytes(UTF_8)); + key.flush(); + + KeyOutputStream groupOutputStream = (KeyOutputStream) key. + getOutputStream(); + List locationInfoList = + groupOutputStream.getLocationInfoList(); + assertEquals(1, locationInfoList.size()); + + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + Pipeline pipeline = omKeyLocationInfo.getPipeline(); + List datanodes = + new ArrayList<>(TestHelper.getDatanodeServices(cluster, + pipeline)); + + DatanodeDetails dn = datanodes.get(0).getDatanodeDetails(); + + // Delete all data volumes. + cluster.getHddsDatanode(dn).getDatanodeStateMachine().getContainer().getVolumeSet().getVolumesList() + .stream().forEach(v -> { + try { + FileUtils.deleteDirectory(v.getStorageDir()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + // Delete datanode.id datanodeIdFile. + File datanodeIdFile = new File(HddsServerUtil.getDatanodeIdFilePath(cluster.getHddsDatanode(dn).getConf())); + boolean deleted = datanodeIdFile.delete(); + assertTrue(deleted); + cluster.restartHddsDatanode(dn, false); + GenericTestUtils.waitFor(() -> { + try { + key.write("ratis".getBytes(UTF_8)); + key.flush(); + return groupOutputStream.getLocationInfoList().size() > 1; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }, 1000, 30000); + } + } + @Test public void testContainerStateMachineFailures() throws Exception { OzoneOutputStream key = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 229059d84ad..b59d4885d71 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -72,7 +71,6 @@ public class TestContainerStateMachineFlushDelay { private ObjectStore objectStore; private String volumeName; private String bucketName; - private String path; private int chunkSize; private int flushSize; private int maxFlushSize; @@ -91,10 +89,6 @@ public void setup() throws Exception { maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; keyString = UUID.randomUUID().toString(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFlushDelay.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index d4ff8573627..bb42d8a0f57 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client.rpc; -import java.io.File; import java.io.IOException; import java.time.Duration; import java.util.HashMap; @@ -60,7 +59,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -91,7 +89,6 @@ public class TestDeleteWithInAdequateDN { private static ObjectStore objectStore; private static String volumeName; private static String bucketName; - private static String path; private static XceiverClientManager xceiverClientManager; private static final int FACTOR_THREE_PIPELINE_COUNT = 1; @@ -105,10 +102,6 @@ public static void init() throws Exception { final int numOfDatanodes = 3; conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java index c5147ecfb01..bd1fbe7382a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java @@ -17,15 +17,575 @@ package org.apache.hadoop.ozone.client.rpc; +import org.apache.commons.lang3.NotImplementedException; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.ECKeyOutputStream; +import org.apache.hadoop.ozone.client.io.KeyOutputStream; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Unhealthy; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; /** - * Tests key output stream without zero-copy enabled. + * Tests key output stream. */ -public class TestECKeyOutputStream extends - AbstractTestECKeyOutputStream { +public class TestECKeyOutputStream { + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf = new OzoneConfiguration(); + private static OzoneClient client; + private static ObjectStore objectStore; + private static int chunkSize; + private static int flushSize; + private static int maxFlushSize; + private static int blockSize; + private static String volumeName; + private static String bucketName; + private static String keyString; + private static int dataBlocks = 3; + private static int inputSize = dataBlocks * chunkSize; + private static byte[][] inputChunks = new byte[dataBlocks][chunkSize]; + + private static void initConf(OzoneConfiguration configuration) { + OzoneClientConfig clientConfig = configuration.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + configuration.setFromObject(clientConfig); + + // If SCM detects dead node too quickly, then container would be moved to + // closed state and all in progress writes will get exception. To avoid + // that, we are just keeping higher timeout and none of the tests depending + // on deadnode detection timeout currently. + configuration.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); + configuration.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 60, TimeUnit.SECONDS); + configuration.setTimeDuration("hdds.ratis.raft.server.rpc.slowness.timeout", 300, + TimeUnit.SECONDS); + configuration.set("ozone.replication.allowed-configs", "(^((STANDALONE|RATIS)/(ONE|THREE))|(EC/(3-2|6-3|10-4)-" + + "(512|1024|2048|4096|1)k)$)"); + configuration.setTimeDuration( + "hdds.ratis.raft.server.notification.no-leader.timeout", 300, + TimeUnit.SECONDS); + configuration.setQuietMode(false); + configuration.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, + StorageUnit.MB); + configuration.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 500, + TimeUnit.MILLISECONDS); + configuration.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, + TimeUnit.SECONDS); + configuration.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); + // "Enable" hsync to verify that hsync would be blocked by ECKeyOutputStream + configuration.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + configuration.setBoolean("ozone.client.hbase.enhancements.allowed", true); + configuration.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .applyTo(configuration); + } + + /** + * Create a MiniDFSCluster for testing. + */ @BeforeAll - public static void init() throws Exception { - init(false); + protected static void init() throws Exception { + chunkSize = 1024 * 1024; + flushSize = 2 * chunkSize; + maxFlushSize = 2 * flushSize; + blockSize = 2 * maxFlushSize; + initConf(conf); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) + .build(); + cluster.waitForClusterToBeReady(); + client = OzoneClientFactory.getRpcClient(conf); + objectStore = client.getObjectStore(); + keyString = UUID.randomUUID().toString(); + volumeName = "testeckeyoutputstream"; + bucketName = volumeName; + objectStore.createVolume(volumeName); + objectStore.getVolume(volumeName).createBucket(bucketName); + initInputChunks(); + } + + /** + * Shutdown MiniDFSCluster. + */ + @AfterAll + public static void shutdown() { + IOUtils.closeQuietly(client); + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + try (OzoneOutputStream key = TestHelper + .createKey(keyString, new ECReplicationConfig(3, 2, + ECReplicationConfig.EcCodec.RS, chunkSize), inputSize, + objectStore, volumeName, bucketName)) { + assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); + } + } + + @Test + @Unhealthy("HDDS-11821") + public void testECKeyCreatetWithDatanodeIdChange() + throws Exception { + AtomicReference failed = new AtomicReference<>(false); + AtomicReference miniOzoneCluster = new AtomicReference<>(); + OzoneClient client1 = null; + try (MockedStatic mockedHandler = Mockito.mockStatic(Handler.class, Mockito.CALLS_REAL_METHODS)) { + Map handlers = new HashMap<>(); + mockedHandler.when(() -> Handler + .getHandlerForContainerType(any(), any(), any(), any(), any(), any(), any(), any())) + .thenAnswer(i -> { + Handler handler = Mockito.spy((Handler) i.callRealMethod()); + handlers.put(handler.getDatanodeId(), handler); + return handler; + }); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + initConf(ozoneConfiguration); + miniOzoneCluster.set(MiniOzoneCluster.newBuilder(ozoneConfiguration).setNumDatanodes(10).build()); + miniOzoneCluster.get().waitForClusterToBeReady(); + client1 = miniOzoneCluster.get().newClient(); + ObjectStore store = client1.getObjectStore(); + store.createVolume(volumeName); + store.getVolume(volumeName).createBucket(bucketName); + OzoneOutputStream key = TestHelper.createKey(keyString, new ECReplicationConfig(3, 2, + ECReplicationConfig.EcCodec.RS, 1024), inputSize, store, volumeName, bucketName); + byte[] b = new byte[6 * 1024]; + ECKeyOutputStream groupOutputStream = (ECKeyOutputStream) key.getOutputStream(); + List locationInfoList = groupOutputStream.getLocationInfoList(); + while (locationInfoList.isEmpty()) { + locationInfoList = groupOutputStream.getLocationInfoList(); + Random random = new Random(); + random.nextBytes(b); + assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); + key.write(b); + key.flush(); + } + + assertEquals(1, locationInfoList.size()); + + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + long containerId = omKeyLocationInfo.getContainerID(); + Pipeline pipeline = omKeyLocationInfo.getPipeline(); + DatanodeDetails dnWithReplicaIndex1 = + pipeline.getReplicaIndexes().entrySet().stream().filter(e -> e.getValue() == 1).map(Map.Entry::getKey) + .findFirst().get(); + Mockito.when(handlers.get(dnWithReplicaIndex1.getUuidString()).getDatanodeId()) + .thenAnswer(i -> { + if (!failed.get()) { + // Change dnId for one write chunk request. + failed.set(true); + return dnWithReplicaIndex1.getUuidString() + "_failed"; + } else { + return dnWithReplicaIndex1.getUuidString(); + } + }); + locationInfoList = groupOutputStream.getLocationInfoList(); + while (locationInfoList.size() == 1) { + locationInfoList = groupOutputStream.getLocationInfoList(); + Random random = new Random(); + random.nextBytes(b); + assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); + key.write(b); + key.flush(); + } + assertEquals(2, locationInfoList.size()); + assertNotEquals(locationInfoList.get(1).getPipeline().getId(), pipeline.getId()); + GenericTestUtils.waitFor(() -> { + try { + return miniOzoneCluster.get().getStorageContainerManager().getContainerManager() + .getContainer(ContainerID.valueOf(containerId)).getState().equals( + HddsProtos.LifeCycleState.CLOSED); + } catch (ContainerNotFoundException e) { + throw new RuntimeException(e); + } + }, 1000, 30000); + key.close(); + Assertions.assertTrue(failed.get()); + } finally { + IOUtils.closeQuietly(client1); + if (miniOzoneCluster.get() != null) { + miniOzoneCluster.get().shutdown(); + } + } } + + @Test + public void testCreateKeyWithOutBucketDefaults() throws Exception { + OzoneVolume volume = objectStore.getVolume(volumeName); + OzoneBucket bucket = volume.getBucket(bucketName); + try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) { + assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + } + + @Test + public void testCreateKeyWithBucketDefaults() throws Exception { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + bucketArgs.setDefaultReplicationConfig( + new DefaultReplicationConfig( + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize))); + + volume.createBucket(myBucket, bucketArgs.build()); + OzoneBucket bucket = volume.getBucket(myBucket); + + try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) { + assertInstanceOf(ECKeyOutputStream.class, out.getOutputStream()); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + byte[] buf = new byte[chunkSize]; + try (OzoneInputStream in = bucket.readKey(keyString)) { + for (byte[] inputChunk : inputChunks) { + int read = in.read(buf, 0, chunkSize); + assertEquals(chunkSize, read); + assertArrayEquals(buf, inputChunk); + } + } + } + + @Test + public void testOverwriteECKeyWithRatisKey() throws Exception { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + volume.createBucket(myBucket, bucketArgs.build()); + OzoneBucket bucket = volume.getBucket(myBucket); + createKeyAndCheckReplicationConfig(keyString, bucket, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize)); + + //Overwrite with RATIS/THREE + createKeyAndCheckReplicationConfig(keyString, bucket, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + + //Overwrite with RATIS/ONE + createKeyAndCheckReplicationConfig(keyString, bucket, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); + } + + @Test + public void testOverwriteRatisKeyWithECKey() throws Exception { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + volume.createBucket(myBucket, bucketArgs.build()); + OzoneBucket bucket = volume.getBucket(myBucket); + + createKeyAndCheckReplicationConfig(keyString, bucket, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + // Overwrite with EC key + createKeyAndCheckReplicationConfig(keyString, bucket, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize)); + } + + private void createKeyAndCheckReplicationConfig(String keyName, + OzoneBucket bucket, ReplicationConfig replicationConfig) + throws IOException { + try (OzoneOutputStream out = bucket + .createKey(keyName, inputSize, replicationConfig, new HashMap<>())) { + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + OzoneKeyDetails key = bucket.getKey(keyName); + assertEquals(replicationConfig, key.getReplicationConfig()); + } + + @Test + public void testCreateRatisKeyAndWithECBucketDefaults() throws Exception { + OzoneBucket bucket = getOzoneBucket(); + try (OzoneOutputStream out = bucket.createKey( + "testCreateRatisKeyAndWithECBucketDefaults", 2000, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + new HashMap<>())) { + assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + } + + @Test + public void test13ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(13); + } + + @Test + public void testChunksInSingleWriteOpWithOffset() throws IOException { + testMultipleChunksInSingleWriteOp(11, 25, 19); + } + + @Test + public void test15ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(15); + } + + @Test + public void test20ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(20); + } + + @Test + public void test21ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(21); + } + + private void testMultipleChunksInSingleWriteOp(int offset, + int bufferChunks, int numChunks) + throws IOException { + byte[] inputData = getInputBytes(offset, bufferChunks, numChunks); + final OzoneBucket bucket = getOzoneBucket(); + String keyName = + String.format("testMultipleChunksInSingleWriteOpOffset" + + "%dBufferChunks%dNumChunks", offset, bufferChunks, + numChunks); + try (OzoneOutputStream out = bucket.createKey(keyName, 4096, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize), new HashMap<>())) { + out.write(inputData, offset, numChunks * chunkSize); + } + + validateContent(offset, numChunks * chunkSize, inputData, bucket, + bucket.getKey(keyName)); + } + + private void testMultipleChunksInSingleWriteOp(int numChunks) + throws IOException { + testMultipleChunksInSingleWriteOp(0, numChunks, numChunks); + } + + @Test + public void testECContainerKeysCountAndNumContainerReplicas() + throws IOException, InterruptedException, TimeoutException { + byte[] inputData = getInputBytes(1); + final OzoneBucket bucket = getOzoneBucket(); + ContainerOperationClient containerOperationClient = + new ContainerOperationClient(conf); + + ECReplicationConfig repConfig = new ECReplicationConfig( + 3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); + // Close all EC pipelines so we must get a fresh pipeline and hence + // container for this test. + PipelineManager pm = + cluster.getStorageContainerManager().getPipelineManager(); + for (Pipeline p : pm.getPipelines(repConfig)) { + pm.closePipeline(p, true); + } + + String keyName = UUID.randomUUID().toString(); + try (OzoneOutputStream out = bucket.createKey(keyName, 4096, + repConfig, new HashMap<>())) { + out.write(inputData); + } + OzoneKeyDetails key = bucket.getKey(keyName); + long currentKeyContainerID = + key.getOzoneKeyLocations().get(0).getContainerID(); + + GenericTestUtils.waitFor(() -> { + try { + return (containerOperationClient.getContainer(currentKeyContainerID) + .getNumberOfKeys() == 1) && (containerOperationClient + .getContainerReplicas(currentKeyContainerID).size() == 5); + } catch (IOException exception) { + fail("Unexpected exception " + exception); + return false; + } + }, 100, 10000); + validateContent(inputData, bucket, key); + } + + private void validateContent(byte[] inputData, OzoneBucket bucket, + OzoneKey key) throws IOException { + validateContent(0, inputData.length, inputData, bucket, key); + } + + private void validateContent(int offset, int length, byte[] inputData, + OzoneBucket bucket, + OzoneKey key) throws IOException { + try (OzoneInputStream is = bucket.readKey(key.getName())) { + byte[] fileContent = new byte[length]; + assertEquals(length, is.read(fileContent)); + assertEquals(new String(Arrays.copyOfRange(inputData, offset, + offset + length), UTF_8), + new String(fileContent, UTF_8)); + } + } + + private OzoneBucket getOzoneBucket() throws IOException { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + bucketArgs.setDefaultReplicationConfig( + new DefaultReplicationConfig( + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize))); + + volume.createBucket(myBucket, bucketArgs.build()); + return volume.getBucket(myBucket); + } + + private static void initInputChunks() { + for (int i = 0; i < dataBlocks; i++) { + inputChunks[i] = getBytesWith(i + 1, chunkSize); + } + } + + private static byte[] getBytesWith(int singleDigitNumber, int total) { + StringBuilder builder = new StringBuilder(singleDigitNumber); + for (int i = 1; i <= total; i++) { + builder.append(singleDigitNumber); + } + return builder.toString().getBytes(UTF_8); + } + + @Test + public void testWriteShouldSucceedWhenDNKilled() throws Exception { + int numChunks = 3; + byte[] inputData = getInputBytes(numChunks); + final OzoneBucket bucket = getOzoneBucket(); + String keyName = "testWriteShouldSucceedWhenDNKilled" + numChunks; + DatanodeDetails nodeToKill = null; + try { + try (OzoneOutputStream out = bucket.createKey(keyName, 1024, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize), new HashMap<>())) { + ECKeyOutputStream ecOut = (ECKeyOutputStream) out.getOutputStream(); + out.write(inputData); + // Kill a node from first pipeline + nodeToKill = ecOut.getStreamEntries() + .get(0).getPipeline().getFirstNode(); + cluster.shutdownHddsDatanode(nodeToKill); + + out.write(inputData); + + // Wait for flushing thread to finish its work. + final long checkpoint = System.currentTimeMillis(); + ecOut.insertFlushCheckpoint(checkpoint); + GenericTestUtils.waitFor(() -> ecOut.getFlushCheckpoint() == checkpoint, + 100, 10000); + + // Check the second blockGroup pipeline to make sure that the failed + // node is not selected. + assertThat(ecOut.getStreamEntries().get(1).getPipeline().getNodes()) + .doesNotContain(nodeToKill); + } + + try (OzoneInputStream is = bucket.readKey(keyName)) { + // We wrote "inputData" twice, so do two reads and ensure the correct + // data comes back. + for (int i = 0; i < 2; i++) { + byte[] fileContent = new byte[inputData.length]; + assertEquals(inputData.length, is.read(fileContent)); + assertEquals(new String(inputData, UTF_8), + new String(fileContent, UTF_8)); + } + } + } finally { + cluster.restartHddsDatanode(nodeToKill, true); + } + } + + private byte[] getInputBytes(int numChunks) { + return getInputBytes(0, numChunks, numChunks); + } + + private byte[] getInputBytes(int offset, int bufferChunks, int numChunks) { + byte[] inputData = new byte[offset + bufferChunks * chunkSize]; + for (int i = 0; i < numChunks; i++) { + int start = offset + (i * chunkSize); + Arrays.fill(inputData, start, start + chunkSize - 1, + String.valueOf(i % 9).getBytes(UTF_8)[0]); + } + return inputData; + } + + @Test + public void testBlockedHflushAndHsync() throws Exception { + // Expect ECKeyOutputStream hflush and hsync calls to throw exception + try (OzoneOutputStream oOut = TestHelper.createKey( + keyString, new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize), + inputSize, objectStore, volumeName, bucketName)) { + assertInstanceOf(ECKeyOutputStream.class, oOut.getOutputStream()); + KeyOutputStream kOut = (KeyOutputStream) oOut.getOutputStream(); + + assertThrows(NotImplementedException.class, () -> kOut.hflush()); + assertThrows(NotImplementedException.class, () -> kOut.hsync()); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index b3d38fe8bc2..5252f49daa3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -112,6 +112,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -124,7 +125,7 @@ class TestOzoneAtRestEncryption { private static OzoneManager ozoneManager; private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - + @TempDir private static File testDir; private static OzoneConfiguration conf; private static final String TEST_KEY = "key1"; @@ -140,9 +141,6 @@ class TestOzoneAtRestEncryption { @BeforeAll static void init() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestSecureOzoneRpcClient.class.getSimpleName()); - File kmsDir = new File(testDir, UUID.randomUUID().toString()); assertTrue(kmsDir.mkdirs()); MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java index fd32698eec2..62429368690 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.TestInstance; @@ -151,5 +152,11 @@ private void assertListStatus(OzoneBucket bucket, String keyName, List versions = files.get(0).getKeyInfo().getKeyLocationVersions(); assertEquals(expectedVersionCount, versions.size()); + + List lightFiles = bucket.listStatusLight(keyName, false, "", 1); + + assertNotNull(lightFiles); + assertEquals(1, lightFiles.size()); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index 958a37380cf..fc29e031548 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -68,6 +68,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -99,15 +100,15 @@ */ class TestSecureOzoneRpcClient extends OzoneRpcClientTests { + @TempDir + private static File testDir; private static String keyProviderUri = "kms://http@kms:9600/kms"; @BeforeAll public static void init() throws Exception { - File testDir = GenericTestUtils.getTestDir( - TestSecureOzoneRpcClient.class.getSimpleName()); OzoneManager.setTestSecureOmFlag(true); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true); conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); @@ -121,6 +122,8 @@ public static void init() throws Exception { // constructed. conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_OBJECT_STORE); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, keyProviderUri); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java index 50bbd81ea6b..b602ce59a5b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java @@ -179,7 +179,6 @@ private static OzoneConfiguration createConfiguration(boolean enableLegacy) { ReplicationManagerConfiguration repConf = conf.getObject(ReplicationManagerConfiguration.class); - repConf.setEnableLegacy(enableLegacy); repConf.setInterval(Duration.ofSeconds(1)); repConf.setUnderReplicatedInterval(Duration.ofSeconds(1)); conf.setFromObject(repConf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 5ff8d713649..cf7d26847bb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -17,11 +17,18 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -31,20 +38,22 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.block.ScmBlockDeletingServiceMetrics; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -80,17 +89,6 @@ import org.slf4j.LoggerFactory; import org.slf4j.event.Level; -import java.io.IOException; -import java.time.Duration; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.List; -import java.util.HashSet; -import java.util.ArrayList; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - import static java.lang.Math.max; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -133,7 +131,6 @@ public void init() throws Exception { conf = new OzoneConfiguration(); GenericTestUtils.setLogLevel(DeletedBlockLogImpl.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(LegacyReplicationManager.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(ReplicationManager.LOG, Level.DEBUG); conf.set("ozone.replication.allowed-configs", @@ -318,6 +315,9 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { assertEquals(metrics.getNumBlockDeletionTransactionCreated(), metrics.getNumBlockDeletionTransactionCompleted()); + assertEquals(metrics.getNumBlockDeletionCommandSent(), metrics.getNumCommandsDatanodeSent()); + assertEquals(metrics.getNumBlockDeletionCommandSuccess(), metrics.getNumCommandsDatanodeSuccess()); + assertEquals(metrics.getBNumBlockDeletionCommandFailure(), metrics.getNumCommandsDatanodeFailed()); assertThat(metrics.getNumBlockDeletionCommandSent()) .isGreaterThanOrEqualTo(metrics.getNumBlockDeletionCommandSuccess() + metrics.getBNumBlockDeletionCommandFailure()); @@ -341,7 +341,6 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { @Test public void testContainerStatisticsAfterDelete() throws Exception { ReplicationManager replicationManager = scm.getReplicationManager(); - boolean legacyEnabled = replicationManager.getConfig().isLegacyEnabled(); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -404,16 +403,13 @@ public void testContainerStatisticsAfterDelete() throws Exception { containerInfos.stream().forEach(container -> assertEquals(HddsProtos.LifeCycleState.DELETING, container.getState())); - LogCapturer logCapturer = LogCapturer.captureLogs( - legacyEnabled ? LegacyReplicationManager.LOG : ReplicationManager.LOG); + LogCapturer logCapturer = LogCapturer.captureLogs(ReplicationManager.LOG); logCapturer.clearOutput(); Thread.sleep(5000); replicationManager.processAll(); ((EventQueue) scm.getEventQueue()).processAll(1000); - String expectedOutput = legacyEnabled - ? "Resend delete Container" - : "Sending delete command for container"; + String expectedOutput = "Sending delete command for container"; GenericTestUtils.waitFor(() -> logCapturer.getOutput() .contains(expectedOutput), 500, 5000); @@ -610,7 +606,7 @@ public void testContainerDeleteWithInvalidKeyCount() final int valueSize = value.getBytes(UTF_8).length; final int keyCount = 1; List containerIdList = new ArrayList<>(); - containerInfos.stream().forEach(container -> { + containerInfos.forEach(container -> { assertEquals(valueSize, container.getUsedBytes()); assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index e68831b494f..40df94858e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -22,8 +22,8 @@ import static org.apache.ozone.test.MetricsAsserts.getDoubleGauge; import static org.apache.ozone.test.MetricsAsserts.getMetrics; -import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.List; import java.util.ArrayList; @@ -49,7 +49,6 @@ import org.apache.hadoop.ozone.container.common.transport.server .XceiverServerSpi; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; @@ -66,14 +65,14 @@ import org.apache.ratis.util.function.CheckedBiFunction; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; /** * This class tests the metrics of ContainerStateMachine. */ public class TestCSMMetrics { - private static final String TEST_DIR = - GenericTestUtils.getTestDir("dfs").getAbsolutePath() - + File.separator; + @TempDir + private static Path testDir; @BeforeAll public static void setup() { @@ -154,6 +153,14 @@ static void runContainerStateMachineMetrics( assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric); assertCounter("WriteChunkMsNumOps", 1L, metric); + applyTransactionLatency = getDoubleGauge( + "ApplyTransactionNsAvgTime", metric); + assertThat(applyTransactionLatency).isGreaterThan(0.0); + writeStateMachineLatency = getDoubleGauge( + "WriteStateMachineDataNsAvgTime", metric); + assertThat(writeStateMachineLatency).isGreaterThan(0.0); + + //Read Chunk ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest @@ -166,12 +173,6 @@ static void runContainerStateMachineMetrics( RaftGroupId.valueOf(pipeline.getId().getId())); assertCounter("NumQueryStateMachineOps", 1L, metric); assertCounter("NumApplyTransactionOps", 1L, metric); - applyTransactionLatency = getDoubleGauge( - "ApplyTransactionNsAvgTime", metric); - assertThat(applyTransactionLatency).isGreaterThan(0.0); - writeStateMachineLatency = getDoubleGauge( - "WriteStateMachineDataNsAvgTime", metric); - assertThat(writeStateMachineLatency).isGreaterThan(0.0); } finally { if (client != null) { @@ -184,8 +185,8 @@ static void runContainerStateMachineMetrics( static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); + dn.getRatisPort().getValue()); + final String dir = testDir.resolve(dn.getUuidString()).toString(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index f55912b26b0..e0c0fde6fe2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -24,7 +24,6 @@ import java.util.UUID; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -57,7 +56,6 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ozone.test.GenericTestUtils; import com.google.common.collect.Maps; import static org.apache.ozone.test.MetricsAsserts.assertCounter; @@ -70,7 +68,6 @@ import org.apache.ratis.util.function.CheckedBiFunction; import org.apache.ratis.util.function.CheckedConsumer; import org.apache.ratis.util.function.CheckedFunction; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -82,7 +79,8 @@ */ @Timeout(300) public class TestContainerMetrics { - static final String TEST_DIR = GenericTestUtils.getRandomizedTempPath() + File.separator; + @TempDir + private static Path testDir; @TempDir private Path tempDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -94,17 +92,8 @@ public static void setup() { CONF.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, DFS_METRICS_PERCENTILES_INTERVALS); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); - CONF.set(OzoneConfigKeys.OZONE_METADATA_DIRS, TEST_DIR); - - } + CONF.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); - @AfterAll - public static void cleanup() { - // clean up volume dir - File file = new File(TEST_DIR); - if (file.exists()) { - FileUtil.fullyDelete(file); - } } @AfterEach @@ -119,7 +108,7 @@ public void testContainerMetrics() throws Exception { runTestClientServer(pipeline -> CONF .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + .getStandalonePort().getValue()), pipeline -> new XceiverClientGrpc(pipeline, CONF), (dn, volumeSet) -> new XceiverServerGrpc(dn, CONF, createDispatcher(dn, volumeSet), null), (dn, p) -> { @@ -172,7 +161,7 @@ static void runTestClientServer( initConf.accept(pipeline); DatanodeDetails dn = pipeline.getFirstNode(); - volumeSet = createVolumeSet(dn, TEST_DIR + dn.getUuidString()); + volumeSet = createVolumeSet(dn, testDir.resolve(dn.getUuidString()).toString()); server = createServer.apply(dn, volumeSet); server.start(); initServer.accept(dn, pipeline); @@ -235,8 +224,8 @@ static void runTestClientServer( private XceiverServerSpi newXceiverServerRatis(DatanodeDetails dn, MutableVolumeSet volumeSet) throws IOException { CONF.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); + dn.getRatisPort().getValue()); + final String dir = testDir.resolve(dn.getUuidString()).toString(); CONF.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, volumeSet); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1c5da04c0a3..553ea03f1fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.ozoneimpl; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -31,11 +32,13 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.io.IOException; import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; @@ -73,8 +76,7 @@ public void testCreateOzoneContainer( conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); + pipeline.getFirstNode().getStandalonePort().getValue()); DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils @@ -106,8 +108,7 @@ void testOzoneContainerStart( conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); + pipeline.getFirstNode().getStandalonePort().getValue()); DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils @@ -160,6 +161,159 @@ public void testOzoneContainerViaDataNode() throws Exception { } } + @Test + public void testOzoneContainerWithMissingContainer() throws Exception { + MiniOzoneCluster cluster = null; + try { + long containerID = + ContainerTestHelper.getTestContainerID(); + OzoneConfiguration conf = newOzoneConfiguration(); + + // Start ozone container Via Datanode create. + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(1) + .build(); + cluster.waitForClusterToBeReady(); + + runTestOzoneContainerWithMissingContainer(cluster, containerID); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + private void runTestOzoneContainerWithMissingContainer( + MiniOzoneCluster cluster, long testContainerID) throws Exception { + ContainerProtos.ContainerCommandRequestProto + request, writeChunkRequest, putBlockRequest, + updateRequest1, updateRequest2; + ContainerProtos.ContainerCommandResponseProto response, + updateResponse1, updateResponse2; + XceiverClientGrpc client = null; + try { + // This client talks to ozone container via datanode. + client = createClientForTesting(cluster); + client.connect(); + Pipeline pipeline = client.getPipeline(); + createContainerForTesting(client, testContainerID); + writeChunkRequest = writeChunkForContainer(client, testContainerID, + 1024); + + DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0).getDatanodeDetails(); + File containerPath = + new File(cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(testContainerID) + .getContainerData().getContainerPath()); + cluster.getHddsDatanode(datanodeDetails).stop(); + FileUtils.deleteDirectory(containerPath); + + // Restart & Check if the container has been marked as missing, since the container directory has been deleted. + cluster.restartHddsDatanode(datanodeDetails, false); + GenericTestUtils.waitFor(() -> { + try { + return cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getMissingContainerSet().contains(testContainerID); + } catch (IOException e) { + return false; + } + }, 1000, 30000); + + // Read Chunk + request = ContainerTestHelper.getReadChunkRequest( + pipeline, writeChunkRequest.getWriteChunk()); + + response = client.sendCommand(request); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + + response = createContainerForTesting(client, testContainerID); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Put Block + putBlockRequest = ContainerTestHelper.getPutBlockRequest( + pipeline, writeChunkRequest.getWriteChunk()); + + response = client.sendCommand(putBlockRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Write chunk + response = client.sendCommand(writeChunkRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Get Block + request = ContainerTestHelper. + getBlockRequest(pipeline, putBlockRequest.getPutBlock()); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + + // Create Container + request = ContainerTestHelper.getCreateContainerRequest(testContainerID, pipeline); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Delete Block and Delete Chunk are handled by BlockDeletingService + // ContainerCommandRequestProto DeleteBlock and DeleteChunk requests + // are deprecated + + //Update an existing container + Map containerUpdate = new HashMap(); + containerUpdate.put("container_updated_key", "container_updated_value"); + updateRequest1 = ContainerTestHelper.getUpdateContainerRequest( + testContainerID, containerUpdate); + updateResponse1 = client.sendCommand(updateRequest1); + assertNotNull(updateResponse1); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, updateResponse1.getResult()); + + //Update an non-existing container + long nonExistingContinerID = + ContainerTestHelper.getTestContainerID(); + updateRequest2 = ContainerTestHelper.getUpdateContainerRequest( + nonExistingContinerID, containerUpdate); + updateResponse2 = client.sendCommand(updateRequest2); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, + updateResponse2.getResult()); + + // Restarting again & checking if the container is still not present on disk and marked as missing, this is to + // ensure the previous write request didn't inadvertently create the container data. + cluster.restartHddsDatanode(datanodeDetails, false); + GenericTestUtils.waitFor(() -> { + try { + return cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getMissingContainerSet().contains(testContainerID); + } catch (IOException e) { + return false; + } + }, 1000, 30000); + // Create Recovering Container + request = ContainerTestHelper.getCreateContainerRequest(testContainerID, pipeline, + ContainerProtos.ContainerDataProto.State.RECOVERING); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //write chunk on recovering container + response = client.sendCommand(writeChunkRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //write chunk on recovering container + response = client.sendCommand(putBlockRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //Get block on the recovering container should succeed now. + request = ContainerTestHelper.getBlockRequest(pipeline, putBlockRequest.getPutBlock()); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + + } finally { + if (client != null) { + client.close(); + } + } + } + public static void runTestOzoneContainerViaDataNode( long testContainerID, XceiverClientSpi client) throws Exception { ContainerProtos.ContainerCommandRequestProto @@ -506,10 +660,14 @@ private static XceiverClientGrpc createClientForTesting( MiniOzoneCluster cluster) { Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipelines().iterator().next(); + return createClientForTesting(pipeline, cluster); + } + + private static XceiverClientGrpc createClientForTesting(Pipeline pipeline, MiniOzoneCluster cluster) { return new XceiverClientGrpc(pipeline, cluster.getConf()); } - public static void createContainerForTesting(XceiverClientSpi client, + public static ContainerProtos.ContainerCommandResponseProto createContainerForTesting(XceiverClientSpi client, long containerID) throws Exception { // Create container ContainerProtos.ContainerCommandRequestProto request = @@ -518,6 +676,7 @@ public static void createContainerForTesting(XceiverClientSpi client, ContainerProtos.ContainerCommandResponseProto response = client.sendCommand(request); assertNotNull(response); + return response; } public static ContainerProtos.ContainerCommandRequestProto diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 92d716f7a40..262d3026e78 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -42,7 +42,6 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -82,6 +81,8 @@ class TestSecureOzoneContainer { @TempDir private Path tempFolder; + @TempDir + private Path ozoneMetaPath; private OzoneConfiguration conf; private CertificateClientTestImpl caClient; @@ -107,9 +108,7 @@ static void init() { @BeforeEach void setup() throws Exception { conf = new OzoneConfiguration(); - String ozoneMetaPath = - GenericTestUtils.getTempPath("ozoneMeta"); - conf.set(OZONE_METADATA_DIRS, ozoneMetaPath); + conf.set(OZONE_METADATA_DIRS, ozoneMetaPath.toString()); caClient = new CertificateClientTestImpl(conf); secretKeyClient = new SecretKeyTestClient(); secretManager = new ContainerTokenSecretManager( @@ -132,8 +131,7 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.toString()); conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline - .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue()); + .getFirstNode().getStandalonePort().getValue()); conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java index 08932aa4e37..4e7aeaeef79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java @@ -137,7 +137,7 @@ void targetPullsFromWrongService() throws Exception { long containerID = createNewClosedContainer(source); DatanodeDetails invalidPort = new DatanodeDetails(source); invalidPort.setPort(Port.Name.REPLICATION, - source.getPort(Port.Name.STANDALONE).getValue()); + source.getStandalonePort().getValue()); ReplicateContainerCommand cmd = ReplicateContainerCommand.fromSources(containerID, ImmutableList.of(invalidPort)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 27e85501662..32e7a0e9e66 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.server; -import java.io.File; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; @@ -62,7 +61,6 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ozone.test.GenericTestUtils; import com.google.common.collect.Maps; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.util.function.CheckedBiConsumer; @@ -82,8 +80,8 @@ * Test Containers. */ public class TestContainerServer { - static final String TEST_DIR = GenericTestUtils.getTestDir("dfs") - .getAbsolutePath() + File.separator; + @TempDir + private static Path testDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; @TempDir @@ -92,7 +90,7 @@ public class TestContainerServer { @BeforeAll public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); - CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); + CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, testDir.toString()); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, @@ -110,7 +108,7 @@ public void testClientServer() throws Exception { runTestClientServer(1, (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + .getStandalonePort().getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf, new TestContainerDispatcher(), caClient), (dn, p) -> { @@ -126,8 +124,8 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); + dn.getRatisPort().getValue()); + final String dir = testDir.resolve(dn.getUuid().toString()).toString(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); @@ -191,9 +189,9 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, - Paths.get(TEST_DIR, "dfs", "data", "hdds", + Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); - conf.set(OZONE_METADATA_DIRS, TEST_DIR); + conf.set(OZONE_METADATA_DIRS, testDir.toString()); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) @@ -211,8 +209,7 @@ public void testClientServerWithContainerDispatcher() throws Exception { UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + pipeline.getFirstNode().getStandalonePort().getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(dd, conf, hddsDispatcher, caClient), (dn, p) -> { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index cae7f6bb59e..8be3549f67e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -70,7 +70,6 @@ import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; @@ -98,7 +97,6 @@ import org.apache.ratis.util.function.CheckedBiConsumer; import org.apache.ratis.util.function.CheckedBiFunction; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -117,8 +115,8 @@ public class TestSecureContainerServer { @TempDir private Path tempDir; - private static final String TEST_DIR - = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; + @TempDir + private static Path testDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClientTestImpl caClient; private static SecretKeyClient secretKeyClient; @@ -129,7 +127,7 @@ public class TestSecureContainerServer { public static void setup() throws Exception { DefaultMetricsSystem.setMiniClusterMode(true); ExitUtils.disableSystemExit(); - CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); + CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, testDir.toString()); CONF.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); caClient = new CertificateClientTestImpl(CONF); @@ -144,11 +142,6 @@ public static void setup() throws Exception { tokenLifetime, secretKeyClient); } - @AfterAll - public static void deleteTestDir() { - FileUtils.deleteQuietly(new File(TEST_DIR)); - } - @AfterEach public void cleanUp() throws IOException { FileUtils.deleteQuietly(new File(CONF.get(HDDS_DATANODE_DIR_KEY))); @@ -162,7 +155,7 @@ public void testClientServer() throws Exception { runTestClientServer(1, (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + .getStandalonePort().getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(dd, conf, hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); @@ -172,9 +165,9 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, - Paths.get(TEST_DIR, "dfs", "data", "hdds", + Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); - conf.set(OZONE_METADATA_DIRS, TEST_DIR); + conf.set(OZONE_METADATA_DIRS, testDir.toString()); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) @@ -195,12 +188,12 @@ public void testClientServerRatisGrpc() throws Exception { XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + dn.getRatisPort().getValue()); conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); - final String dir = TEST_DIR + dn.getUuid(); + final String dir = testDir.resolve(dn.getUuidString()).toString(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index 7af0b5f9aa1..aac55367adc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.debug; import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.commons.lang3.tuple.Pair; @@ -35,6 +36,8 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; +import org.apache.hadoop.ozone.debug.ldb.DBScanner; +import org.apache.hadoop.ozone.debug.ldb.RDBParser; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import jakarta.annotation.Nonnull; @@ -68,6 +71,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * This class tests `ozone debug ldb` CLI that reads from a RocksDB directory. @@ -98,8 +102,6 @@ public void setup() throws IOException { pstderr = new PrintWriter(stderr); cmd = new CommandLine(new RDBParser()) - .addSubcommand(new DBScanner()) - .addSubcommand(new ValueSchema()) .setOut(pstdout) .setErr(pstderr); @@ -120,6 +122,7 @@ public void shutdown() throws IOException { /** * Defines ldb tool test cases. */ + @SuppressWarnings({"methodlength"}) private static Stream scanTestCases() { return Stream.of( Arguments.of( @@ -170,6 +173,55 @@ private static Stream scanTestCases() { Named.of("Invalid EndKey key9", Arrays.asList("--endkey", "key9")), Named.of("Expect key1-key5", Pair.of("key1", "key6")) ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter key3", Arrays.asList("--filter", "keyName:equals:key3")), + Named.of("Expect key3", Pair.of("key3", "key4")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter invalid key", Arrays.asList("--filter", "keyName:equals:key9")), + Named.of("Expect key1-key3", null) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize<2000", Arrays.asList("--filter", "dataSize:lesser:2000")), + Named.of("Expect key1-key5", Pair.of("key1", "key6")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize<500", Arrays.asList("--filter", "dataSize:lesser:500")), + Named.of("Expect empty result", null) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize>500", Arrays.asList("--filter", "dataSize:greater:500")), + Named.of("Expect key1-key5", Pair.of("key1", "key6")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize>2000", Arrays.asList("--filter", "dataSize:greater:2000")), + Named.of("Expect empty result", null) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter key3 regex", Arrays.asList("--filter", "keyName:regex:^.*3$")), + Named.of("Expect key3", Pair.of("key3", "key4")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter keys whose dataSize digits start with 5 using regex", + Arrays.asList("--filter", "dataSize:regex:^5.*$")), + Named.of("Expect empty result", null) + ), Arguments.of( Named.of(BLOCK_DATA + " V3", Pair.of(BLOCK_DATA, true)), Named.of("Default", Pair.of(0, "")), @@ -291,6 +343,50 @@ void testScanOfPipelinesWhenNoData() throws IOException { assertEquals("", stderr.toString()); } + @Test + void testScanWithRecordsPerFile() throws IOException { + // Prepare dummy table + int recordsCount = 5; + prepareKeyTable(recordsCount); + + String scanDir1 = tempDir.getAbsolutePath() + "/scandir1"; + // Prepare scan args + int maxRecordsPerFile = 2; + List completeScanArgs1 = new ArrayList<>(Arrays.asList( + "--db", dbStore.getDbLocation().getAbsolutePath(), + "scan", + "--column-family", KEY_TABLE, "--out", scanDir1 + File.separator + "keytable", + "--max-records-per-file", String.valueOf(maxRecordsPerFile))); + File tmpDir1 = new File(scanDir1); + tmpDir1.deleteOnExit(); + + int exitCode1 = cmd.execute(completeScanArgs1.toArray(new String[0])); + assertEquals(0, exitCode1); + assertTrue(tmpDir1.isDirectory()); + File[] subFiles = tmpDir1.listFiles(); + assertNotNull(subFiles); + assertEquals(Math.ceil(recordsCount / (maxRecordsPerFile * 1.0)), subFiles.length); + for (File subFile : subFiles) { + JsonNode jsonNode = MAPPER.readTree(subFile); + assertNotNull(jsonNode); + } + + String scanDir2 = tempDir.getAbsolutePath() + "/scandir2"; + // Used with parameter '-l' + List completeScanArgs2 = new ArrayList<>(Arrays.asList( + "--db", dbStore.getDbLocation().getAbsolutePath(), + "scan", + "--column-family", KEY_TABLE, "--out", scanDir2 + File.separator + "keytable", + "--max-records-per-file", String.valueOf(maxRecordsPerFile), "-l", "2")); + File tmpDir2 = new File(scanDir2); + tmpDir2.deleteOnExit(); + + int exitCode2 = cmd.execute(completeScanArgs2.toArray(new String[0])); + assertEquals(0, exitCode2); + assertTrue(tmpDir2.isDirectory()); + assertEquals(1, tmpDir2.listFiles().length); + } + @Test void testSchemaCommand() throws IOException { // Prepare dummy table @@ -339,22 +435,7 @@ private void prepareTable(String tableName, boolean schemaV3) switch (tableName) { case KEY_TABLE: - // Dummy om.db with only keyTable - dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db") - .setPath(tempDir.toPath()).addTable(KEY_TABLE).build(); - - Table keyTable = dbStore.getTable(KEY_TABLE); - // Insert 5 keys - for (int i = 1; i <= 5; i++) { - String key = "key" + i; - OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", - key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, HddsProtos.ReplicationFactor.ONE)).build(); - keyTable.put(key.getBytes(UTF_8), - value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); - - // Populate map - dbMap.put(key, toMap(value)); - } + prepareKeyTable(5); break; case BLOCK_DATA: @@ -402,6 +483,29 @@ private void prepareTable(String tableName, boolean schemaV3) } } + /** + * Prepare the keytable for testing. + * @param recordsCount prepare the number of keys + */ + private void prepareKeyTable(int recordsCount) throws IOException { + if (recordsCount < 1) { + throw new IllegalArgumentException("recordsCount must be greater than 1."); + } + // Dummy om.db with only keyTable + dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db") + .setPath(tempDir.toPath()).addTable(KEY_TABLE).build(); + Table keyTable = dbStore.getTable(KEY_TABLE); + for (int i = 1; i <= recordsCount; i++) { + String key = "key" + i; + OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", + key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, + HddsProtos.ReplicationFactor.ONE)).build(); + keyTable.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + // Populate map + dbMap.put(key, toMap(value)); + } + } + private static Map toMap(Object obj) throws IOException { ObjectWriter objectWriter = DBScanner.JsonSerializationHelper.getWriter(); String json = objectWriter.writeValueAsString(obj); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java index c24cf748ddb..29f91821ebd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java @@ -70,6 +70,8 @@ public class TestLeaseRecoverer { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index 7c82633f113..c5a45da8c77 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.segmentparser.DatanodeRatisLogParser; +import org.apache.hadoop.ozone.debug.segmentparser.DatanodeRatisLogParser; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java index 9137eca6c0e..d5551ce6737 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java @@ -36,6 +36,7 @@ import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -45,6 +46,9 @@ import java.io.IOException; import java.net.URI; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -53,8 +57,8 @@ * Test for HadoopDirTreeGenerator. */ public class TestHadoopDirTreeGenerator { - - private String path; + @TempDir + private java.nio.file.Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -64,12 +68,8 @@ public class TestHadoopDirTreeGenerator { @BeforeEach public void setup() { - path = GenericTestUtils - .getTempPath(TestHadoopDirTreeGenerator.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -79,7 +79,6 @@ private void shutdown() throws IOException { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); - FileUtils.deleteDirectory(new File(path)); } } @@ -108,8 +107,8 @@ protected OzoneConfiguration getOzoneConfiguration() { public void testNestedDirTreeGeneration() throws Exception { try { startCluster(); - FileOutputStream out = FileUtils.openOutputStream(new File(path, - "conf")); + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), + "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); out.close(); @@ -140,7 +139,7 @@ private void verifyDirTree(String volumeName, String bucketName, int depth, OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); String rootPath = "o3fs://" + bucketName + "." + volumeName; - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); new Freon().execute( new String[]{"-conf", confPath, "dtsg", "-d", depth + "", "-c", fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath, @@ -154,7 +153,7 @@ private void verifyDirTree(String volumeName, String bucketName, int depth, FileStatus[] fileStatuses = fileSystem.listStatus(rootDir); // verify the num of peer directories, expected span count is 1 // as it has only one dir at root. - verifyActualSpan(1, fileStatuses); + verifyActualSpan(1, Arrays.asList(fileStatuses)); for (FileStatus fileStatus : fileStatuses) { int actualDepth = traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span, @@ -168,14 +167,16 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth, int expectedFileCnt, StorageSize perFileSize) throws IOException { FileStatus[] fileStatuses = fs.listStatus(dirPath); + List fileStatusList = new ArrayList<>(); + Collections.addAll(fileStatusList, fileStatuses); // check the num of peer directories except root and leaf as both // has less dirs. if (depth < expectedDepth - 1) { - verifyActualSpan(expectedSpanCnt, fileStatuses); + verifyActualSpan(expectedSpanCnt, fileStatusList); } int actualNumFiles = 0; ArrayList files = new ArrayList<>(); - for (FileStatus fileStatus : fileStatuses) { + for (FileStatus fileStatus : fileStatusList) { if (fileStatus.isDirectory()) { ++depth; return traverseToLeaf(fs, fileStatus.getPath(), depth, expectedDepth, @@ -196,7 +197,7 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth, } private int verifyActualSpan(int expectedSpanCnt, - FileStatus[] fileStatuses) { + List fileStatuses) { int actualSpan = 0; for (FileStatus fileStatus : fileStatuses) { if (fileStatus.isDirectory()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java index 4411c0d2ea5..f4993d538ee 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java @@ -34,6 +34,7 @@ import java.util.LinkedList; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -42,7 +43,6 @@ import java.io.IOException; import java.net.URI; -import static org.apache.ozone.test.GenericTestUtils.getTempPath; import static org.junit.jupiter.api.Assertions.assertEquals; /** @@ -50,8 +50,8 @@ */ public class TestHadoopNestedDirGenerator { - - private String path; + @TempDir + private java.nio.file.Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -61,11 +61,8 @@ public class TestHadoopNestedDirGenerator { @BeforeEach public void setup() { - path = getTempPath(TestHadoopNestedDirGenerator.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -76,7 +73,6 @@ private void shutdown() throws IOException { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); - FileUtils.deleteDirectory(new File(path)); } } @@ -101,8 +97,7 @@ private void startCluster() throws Exception { public void testNestedDirTreeGeneration() throws Exception { try { startCluster(); - FileOutputStream out = FileUtils.openOutputStream(new File(path, - "conf")); + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); out.close(); @@ -128,7 +123,7 @@ private void verifyDirTree(String volumeName, String bucketName, OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); String rootPath = "o3fs://" + bucketName + "." + volumeName; - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); new Freon().execute(new String[]{"-conf", confPath, "ddsg", "-d", actualDepth + "", "-s", span + "", "-n", "1", "-r", rootPath}); // verify the directory structure diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java index 7026f32d8b3..66714e58bad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java @@ -94,14 +94,13 @@ public void test() throws IOException { OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); - String rootPath = String.format("%s://%s/%s/%s/", - OZONE_OFS_URI_SCHEME, cluster.getConf().get(OZONE_OM_ADDRESS_KEY), - volumeName, bucketName); + String rootPath = String.format("%s://%s/%s/%s/", OZONE_OFS_URI_SCHEME, + cluster.getConf().get(OZONE_OM_ADDRESS_KEY), volumeName, bucketName); int exitCode = cmd.execute( "--path", rootPath, - "--bytes-per-write", "1024", - "--number-of-files", "2", + "--bytes-per-write", "8", + "--writes-per-transaction", "64", "-t", "5", "-n", "100"); assertEquals(0, exitCode); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index c566cae414f..ecb493ecf8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -56,6 +56,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; +import java.util.Collections; import java.util.List; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; @@ -231,7 +232,7 @@ public void testDAGReconstruction() final File checkpointSnap2 = new File(snap2.getDbPath()); GenericTestUtils.waitFor(checkpointSnap2::exists, 2000, 20000); - List sstDiffList21 = differ.getSSTDiffList(snap2, snap1); + List sstDiffList21 = differ.getSSTDiffList(snap2, snap1).orElse(Collections.emptyList()); LOG.debug("Got diff list: {}", sstDiffList21); // Delete 1000 keys, take a 3rd snapshot, and do another diff @@ -250,13 +251,13 @@ public void testDAGReconstruction() final File checkpointSnap3 = new File(snap3.getDbPath()); GenericTestUtils.waitFor(checkpointSnap3::exists, 2000, 20000); - List sstDiffList32 = differ.getSSTDiffList(snap3, snap2); + List sstDiffList32 = differ.getSSTDiffList(snap3, snap2).orElse(Collections.emptyList()); // snap3-snap1 diff result is a combination of snap3-snap2 and snap2-snap1 - List sstDiffList31 = differ.getSSTDiffList(snap3, snap1); + List sstDiffList31 = differ.getSSTDiffList(snap3, snap1).orElse(Collections.emptyList()); // Same snapshot. Result should be empty list - List sstDiffList22 = differ.getSSTDiffList(snap2, snap2); + List sstDiffList22 = differ.getSSTDiffList(snap2, snap2).orElse(Collections.emptyList()); assertThat(sstDiffList22).isEmpty(); snapDB1.close(); snapDB2.close(); @@ -282,13 +283,13 @@ public void testDAGReconstruction() volumeName, bucketName, "snap3", ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); - List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1); + List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1).orElse(Collections.emptyList()); assertEquals(sstDiffList21, sstDiffList21Run2); - List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2); + List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2).orElse(Collections.emptyList()); assertEquals(sstDiffList32, sstDiffList32Run2); - List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1); + List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1).orElse(Collections.emptyList()); assertEquals(sstDiffList31, sstDiffList31Run2); snapDB1.close(); snapDB2.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java index 5244bb85790..8eb83b91356 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java @@ -36,6 +36,7 @@ import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -52,8 +53,8 @@ * Test for OmBucketReadWriteFileOps. */ public class TestOmBucketReadWriteFileOps { - - private String path; + @TempDir + private java.nio.file.Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -63,12 +64,8 @@ public class TestOmBucketReadWriteFileOps { @BeforeEach public void setup() { - path = GenericTestUtils - .getTempPath(TestOmBucketReadWriteFileOps.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -78,7 +75,6 @@ private void shutdown() throws IOException { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); - FileUtils.deleteDirectory(new File(path)); } } @@ -107,8 +103,7 @@ protected OzoneConfiguration getOzoneConfiguration() { public void testOmBucketReadWriteFileOps() throws Exception { try { startCluster(); - FileOutputStream out = FileUtils.openOutputStream(new File(path, - "conf")); + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); out.close(); @@ -154,7 +149,7 @@ private void verifyFreonCommand(ParameterBuilder parameterBuilder) volume.createBucket(parameterBuilder.bucketName); String rootPath = "o3fs://" + parameterBuilder.bucketName + "." + parameterBuilder.volumeName + parameterBuilder.prefixFilePath; - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); new Freon().execute( new String[]{"-conf", confPath, "obrwf", "-P", rootPath, "-r", String.valueOf(parameterBuilder.fileCountForRead), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java index 3c7a04071b3..5e24cfc4e0e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java @@ -34,6 +34,7 @@ import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; @@ -43,6 +44,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.file.Path; import java.util.Iterator; import static org.assertj.core.api.Assertions.assertThat; @@ -55,8 +57,8 @@ public class TestOmBucketReadWriteKeyOps { // TODO: Remove code duplication of TestOmBucketReadWriteKeyOps with // TestOmBucketReadWriteFileOps. - - private String path; + @TempDir + private Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -66,12 +68,8 @@ public class TestOmBucketReadWriteKeyOps { @BeforeEach public void setup() { - path = GenericTestUtils - .getTempPath(TestHadoopDirTreeGenerator.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -111,7 +109,7 @@ private OzoneConfiguration getOzoneConfiguration() { public void testOmBucketReadWriteKeyOps(boolean fsPathsEnabled) throws Exception { try { startCluster(fsPathsEnabled); - FileOutputStream out = FileUtils.openOutputStream(new File(path, + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); @@ -157,7 +155,7 @@ private void verifyFreonCommand(ParameterBuilder parameterBuilder) OzoneVolume volume = store.getVolume(parameterBuilder.volumeName); volume.createBucket(parameterBuilder.bucketName); OzoneBucket bucket = volume.getBucket(parameterBuilder.bucketName); - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); long startTime = System.currentTimeMillis(); new Freon().execute( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java index 7811470887d..63d2870e7d7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java @@ -35,15 +35,14 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.ozone.test.GenericTestUtils; -import org.apache.ratis.util.FileUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; -import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -61,6 +60,8 @@ */ @Timeout(value = 300, unit = TimeUnit.SECONDS) public class TestContainerMapper { + @TempDir + private static Path dbPath; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; private static ObjectStore store = null; @@ -71,14 +72,12 @@ public class TestContainerMapper { private static String bucketName = UUID.randomUUID().toString(); private static OzoneConfiguration conf; private static List keyList = new ArrayList<>(); - private static String dbPath; @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - dbPath = GenericTestUtils.getRandomizedTempPath(); - conf.set(OZONE_OM_DB_DIRS, dbPath); + conf.set(OZONE_OM_DB_DIRS, dbPath.toString()); conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, "100MB"); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB); @@ -137,6 +136,5 @@ private static byte[] generateData(int size, byte val) { public static void shutdown() throws IOException { IOUtils.closeQuietly(ozClient); cluster.shutdown(); - FileUtils.deleteFully(new File(dbPath)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java index 73596781cc6..e1b2a59d78c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -96,6 +97,9 @@ public void testCreateBucketWithOlderClient() throws Exception { OzoneManagerProtocolProtos.StorageTypeProto.DISK) .build()) .build()).build(); + createBucketReq = createBucketReq.toBuilder() + .setUserInfo(OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName(UserGroupInformation.getCurrentUser().getShortUserName()).build()).build(); OzoneManagerProtocolProtos.OMResponse omResponse = cluster.getOzoneManager().getOmServerProtocol() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index efa2963842d..e9c9b946c8e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -92,11 +93,9 @@ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.Time; import com.google.common.collect.Sets; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; @@ -122,6 +121,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -154,7 +154,8 @@ */ @Timeout(300) public class TestKeyManagerImpl { - + @TempDir + private static File dir; private static PrefixManager prefixManager; private static KeyManagerImpl keyManager; private static NodeManager nodeManager; @@ -163,7 +164,6 @@ public class TestKeyManagerImpl { private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneConfiguration conf; private static OMMetadataManager metadataManager; - private static File dir; private static long scmBlockSize; private static final String KEY_NAME = "key1"; private static final String BUCKET_NAME = "bucket1"; @@ -171,13 +171,13 @@ public class TestKeyManagerImpl { private static final String VERSIONED_BUCKET_NAME = "versionedbucket1"; private static final String VOLUME_NAME = "vol1"; private static OzoneManagerProtocol writeClient; + private static OzoneClient rpcClient; private static OzoneManager om; @BeforeAll public static void setUp() throws Exception { ExitUtils.disableSystemExit(); conf = new OzoneConfiguration(); - dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, @@ -219,6 +219,7 @@ public static void setUp() throws Exception { keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); prefixManager = omTestManagers.getPrefixManager(); writeClient = omTestManagers.getWriteClient(); + rpcClient = omTestManagers.getRpcClient(); mockContainerClient(); @@ -235,10 +236,11 @@ public static void setUp() throws Exception { @AfterAll public static void cleanup() throws Exception { + writeClient.close(); + rpcClient.close(); scm.stop(); scm.join(); om.stop(); - FileUtils.deleteDirectory(dir); } @BeforeEach @@ -252,10 +254,11 @@ public void init() throws Exception { public void cleanupTest() throws IOException { mockContainerClient(); org.apache.hadoop.fs.Path volumePath = new org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME); - FileSystem fs = FileSystem.get(conf); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); + try (FileSystem fs = FileSystem.get(conf)) { + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); + } } private static void mockContainerClient() { @@ -334,8 +337,7 @@ public void openKeyFailureInSafeMode() throws Exception { .setKeyName(KEY_NAME) .setDataSize(1000) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .build(); OMException omException = assertThrows(OMException.class, () -> writeClient.openKey(keyArgs)); @@ -1692,8 +1694,7 @@ private OmKeyArgs.Builder createBuilder(String bucketName) .setDataSize(0) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .setVolumeName(VOLUME_NAME) .setOwnerName(ugi.getShortUserName()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java index 204c0ee6681..7e9744d0123 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -48,6 +48,7 @@ import static com.google.common.collect.Lists.newLinkedList; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.junit.jupiter.params.provider.Arguments.of; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -80,6 +81,7 @@ public static void init() throws Exception { // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3); conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index 11594f3ef11..0829c8fc19a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -47,6 +47,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; /** @@ -81,6 +82,7 @@ public static void init() throws Exception { // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3); conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index bd5046bfc0b..f7394729898 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -409,9 +409,9 @@ public void testInstallIncrementalSnapshot(@TempDir Path tempDir) // Do some transactions so that the log index increases List firstKeys = writeKeysToIncreaseLogIndex(leaderRatisServer, - 80); + 100); - SnapshotInfo snapshotInfo2 = createOzoneSnapshot(leaderOM, "snap80"); + SnapshotInfo snapshotInfo2 = createOzoneSnapshot(leaderOM, "snap100"); followerOM.getConfiguration().setInt( OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); @@ -424,9 +424,9 @@ public void testInstallIncrementalSnapshot(@TempDir Path tempDir) }, 1000, 30_000); // Get two incremental tarballs, adding new keys/snapshot for each. - IncrementData firstIncrement = getNextIncrementalTarball(160, 2, leaderOM, + IncrementData firstIncrement = getNextIncrementalTarball(200, 2, leaderOM, leaderRatisServer, faultInjector, followerOM, tempDir); - IncrementData secondIncrement = getNextIncrementalTarball(240, 3, leaderOM, + IncrementData secondIncrement = getNextIncrementalTarball(300, 3, leaderOM, leaderRatisServer, faultInjector, followerOM, tempDir); // Resume the follower thread, it would download the incremental snapshot. @@ -501,10 +501,10 @@ public void testInstallIncrementalSnapshot(@TempDir Path tempDir) assertNotNull(filesInCandidate); assertEquals(0, filesInCandidate.length); - checkSnapshot(leaderOM, followerOM, "snap80", firstKeys, snapshotInfo2); - checkSnapshot(leaderOM, followerOM, "snap160", firstIncrement.getKeys(), + checkSnapshot(leaderOM, followerOM, "snap100", firstKeys, snapshotInfo2); + checkSnapshot(leaderOM, followerOM, "snap200", firstIncrement.getKeys(), firstIncrement.getSnapshotInfo()); - checkSnapshot(leaderOM, followerOM, "snap240", secondIncrement.getKeys(), + checkSnapshot(leaderOM, followerOM, "snap300", secondIncrement.getKeys(), secondIncrement.getSnapshotInfo()); assertEquals( followerOM.getOmSnapshotProvider().getInitCount(), 2, @@ -618,7 +618,7 @@ public void testInstallIncrementalSnapshotWithFailure() throws Exception { // Do some transactions so that the log index increases List firstKeys = writeKeysToIncreaseLogIndex(leaderRatisServer, - 80); + 100); // Start the inactive OM. Checkpoint installation will happen spontaneously. cluster.startInactiveOM(followerNodeId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index f25bb47f0db..b48a7067cad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om; import com.google.common.collect.ImmutableMap; -import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ContainerBlockID; @@ -63,6 +62,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.rpc.RpcClient; @@ -71,7 +71,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.io.grpc.Status; import org.apache.ratis.thirdparty.io.grpc.StatusException; @@ -83,6 +82,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -136,12 +136,12 @@ */ @Timeout(300) public class TestOmContainerLocationCache { - + @TempDir + private static File dir; private static ScmBlockLocationProtocol mockScmBlockLocationProtocol; private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneConfiguration conf; private static OMMetadataManager metadataManager; - private static File dir; private static final String BUCKET_NAME = "bucket1"; private static final String VERSIONED_BUCKET_NAME = "versionedBucket1"; private static final String VOLUME_NAME = "vol1"; @@ -162,14 +162,13 @@ public class TestOmContainerLocationCache { private static final DatanodeDetails DN5 = MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID()); private static final AtomicLong CONTAINER_ID = new AtomicLong(1); - + private static OzoneClient ozoneClient; @BeforeAll public static void setUp() throws Exception { ExitUtils.disableSystemExit(); conf = new OzoneConfiguration(); - dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); conf.setLong(OZONE_KEY_PREALLOCATION_BLOCKS_MAX, 10); @@ -184,6 +183,7 @@ public static void setUp() throws Exception { OmTestManagers omTestManagers = new OmTestManagers(conf, mockScmBlockLocationProtocol, mockScmContainerClient); om = omTestManagers.getOzoneManager(); + ozoneClient = omTestManagers.getRpcClient(); metadataManager = omTestManagers.getMetadataManager(); rpcClient = new RpcClient(conf, null) { @@ -204,8 +204,8 @@ protected XceiverClientFactory createXceiverClientFactory( @AfterAll public static void cleanup() throws Exception { + ozoneClient.close(); om.stop(); - FileUtils.deleteDirectory(dir); } private static XceiverClientManager mockDataNodeClientFactory() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 4619af1baa2..eafa193ae2b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -338,36 +338,42 @@ public void testKeyOps() throws Exception { long initialNumKeyLookup = getLongCounter("NumKeyLookup", omMetrics); long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics); long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics); - long initialNumTrashKeyLists = getLongCounter("NumTrashKeyLists", omMetrics); long initialNumKeys = getLongCounter("NumKeys", omMetrics); long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics); + long initialNumGetObjectTagging = getLongCounter("NumGetObjectTagging", omMetrics); + long initialNumPutObjectTagging = getLongCounter("NumPutObjectTagging", omMetrics); + long initialNumDeleteObjectTagging = getLongCounter("NumDeleteObjectTagging", omMetrics); long initialEcKeyCreateTotal = getLongCounter("EcKeyCreateTotal", omMetrics); long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics); long initialNumKeyLookupFails = getLongCounter("NumKeyLookupFails", omMetrics); long initialNumKeyDeleteFails = getLongCounter("NumKeyDeleteFails", omMetrics); - long initialNumTrashKeyListFails = getLongCounter("NumTrashKeyListFails", omMetrics); long initialNumInitiateMultipartUploadFails = getLongCounter("NumInitiateMultipartUploadFails", omMetrics); long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics); long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics); long initialEcKeyCreateFailsTotal = getLongCounter("EcKeyCreateFailsTotal", omMetrics); + long initialNumGetObjectTaggingFails = getLongCounter("NumGetObjectTaggingFails", omMetrics); + long initialNumPutObjectTaggingFails = getLongCounter("NumPutObjectTaggingFails", omMetrics); + long initialNumDeleteObjectTaggingFails = getLongCounter("NumDeleteObjectTaggingFails", omMetrics); // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); - doKeyOps(keyArgs); + doKeyOps(keyArgs); // This will perform 7 different operations on the key omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 8, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 10, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics)); - assertEquals(initialNumTrashKeyLists + 1, getLongCounter("NumTrashKeyLists", omMetrics)); assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics)); + assertEquals(initialNumGetObjectTagging + 1, getLongCounter("NumGetObjectTagging", omMetrics)); + assertEquals(initialNumPutObjectTagging + 1, getLongCounter("NumPutObjectTagging", omMetrics)); + assertEquals(initialNumDeleteObjectTagging + 1, getLongCounter("NumDeleteObjectTagging", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -409,8 +415,7 @@ public void testKeyOps() throws Exception { doThrow(exception).when(mockKm).lookupKey(any(), any(), any()); doThrow(exception).when(mockKm).listKeys( any(), any(), any(), any(), anyInt()); - doThrow(exception).when(mockKm).listTrash( - any(), any(), any(), any(), anyInt()); + doThrow(exception).when(mockKm).getObjectTagging(any(), any()); OmMetadataReader omMetadataReader = (OmMetadataReader) ozoneManager.getOmMetadataReader().get(); HddsWhiteboxTestUtils.setInternalState( @@ -426,22 +431,23 @@ public void testKeyOps() throws Exception { doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 31, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 37, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 3, getLongCounter("NumKeyLists", omMetrics)); - assertEquals(initialNumTrashKeyLists + 3, getLongCounter("NumTrashKeyLists", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 3, getLongCounter("NumInitiateMultipartUploads", omMetrics)); assertEquals(initialNumKeyAllocateFails + 1, getLongCounter("NumKeyAllocateFails", omMetrics)); assertEquals(initialNumKeyLookupFails + 1, getLongCounter("NumKeyLookupFails", omMetrics)); assertEquals(initialNumKeyDeleteFails + 1, getLongCounter("NumKeyDeleteFails", omMetrics)); assertEquals(initialNumKeyListFails + 1, getLongCounter("NumKeyListFails", omMetrics)); - assertEquals(initialNumTrashKeyListFails + 1, getLongCounter("NumTrashKeyListFails", omMetrics)); assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter( "NumInitiateMultipartUploadFails", omMetrics)); assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumGetObjectTaggingFails + 1, getLongCounter("NumGetObjectTaggingFails", omMetrics)); + assertEquals(initialNumPutObjectTaggingFails + 1, getLongCounter("NumPutObjectTaggingFails", omMetrics)); + assertEquals(initialNumDeleteObjectTaggingFails + 1, getLongCounter("NumDeleteObjectTaggingFails", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -844,8 +850,17 @@ private void doKeyOps(OmKeyArgs keyArgs) { } try { - ozoneManager.listTrash(keyArgs.getVolumeName(), - keyArgs.getBucketName(), null, null, 0); + writeClient.putObjectTagging(keyArgs); + } catch (IOException ignored) { + } + + try { + writeClient.getObjectTagging(keyArgs); + } catch (IOException ignored) { + } + + try { + writeClient.deleteObjectTagging(keyArgs); } catch (IOException ignored) { } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index 6937c52c712..80b97f92275 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -20,9 +20,12 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdfs.LogVerificationAppender; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -33,17 +36,24 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider; import org.apache.hadoop.ozone.om.ha.OMHAMetrics; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.KeyDeletingService; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Logger; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; +import org.apache.ratis.client.RaftClient; import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.retry.RetryPolicies; +import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServerConfigKeys; import org.apache.ratis.util.TimeDuration; import org.junit.jupiter.api.AfterEach; @@ -52,7 +62,9 @@ import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; +import org.slf4j.LoggerFactory; +import java.io.IOException; import java.net.ConnectException; import java.util.HashMap; import java.util.Iterator; @@ -61,6 +73,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT; @@ -71,16 +84,17 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; /** * Ozone Manager HA tests that stop/restart one or more OM nodes. * @see TestOzoneManagerHAWithAllRunning */ -@Flaky("HDDS-11352") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class TestOzoneManagerHAWithStoppedNodes extends TestOzoneManagerHA { - + private static final org.slf4j.Logger LOG = LoggerFactory.getLogger( + TestOzoneManagerHAWithStoppedNodes.class); /** * After restarting OMs we need to wait * for a leader to be elected and ready. @@ -594,6 +608,97 @@ void testListVolumes() throws Exception { objectStore.listVolumesByUser(userName, prefix, "")); } + @Test + void testRetryCacheWithDownedOM() throws Exception { + // Create a volume, a bucket and a key + String userName = "user" + RandomStringUtils.randomNumeric(5); + String adminName = "admin" + RandomStringUtils.randomNumeric(5); + String volumeName = "volume" + RandomStringUtils.randomNumeric(5); + String bucketName = UUID.randomUUID().toString(); + String keyTo = UUID.randomUUID().toString(); + + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner(userName) + .setAdmin(adminName) + .build(); + getObjectStore().createVolume(volumeName, createVolumeArgs); + OzoneVolume ozoneVolume = getObjectStore().getVolume(volumeName); + ozoneVolume.createBucket(bucketName); + OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); + String keyFrom = createKey(ozoneBucket); + + int callId = 10; + ClientId clientId = ClientId.randomId(); + MiniOzoneHAClusterImpl cluster = getCluster(); + OzoneManager omLeader = cluster.getOMLeader(); + + OzoneManagerProtocolProtos.KeyArgs keyArgs = + OzoneManagerProtocolProtos.KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyFrom) + .build(); + OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest + = OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder() + .setKeyArgs(keyArgs) + .setToKeyName(keyTo) + .build(); + OzoneManagerProtocolProtos.OMRequest omRequest = + OzoneManagerProtocolProtos.OMRequest.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey) + .setRenameKeyRequest(renameKeyRequest) + .setClientId(clientId.toString()) + .build(); + // set up the current call so that OM Ratis Server doesn't complain. + Server.getCurCall().set(new Server.Call(callId, 0, null, null, + RPC.RpcKind.RPC_BUILTIN, clientId.toByteString().toByteArray())); + // Submit rename request to OM + OzoneManagerProtocolProtos.OMResponse omResponse = + omLeader.getOmServerProtocol().processRequest(omRequest); + assertTrue(omResponse.getSuccess()); + + // Make one of the follower OM the leader, and shutdown the current leader. + OzoneManager newLeader = cluster.getOzoneManagersList().stream().filter( + om -> !om.getOMNodeId().equals(omLeader.getOMNodeId())).findFirst().get(); + transferLeader(omLeader, newLeader); + cluster.shutdownOzoneManager(omLeader); + + // Once the rename completes, the source key should no longer exist + // and the destination key should exist. + OMException omException = assertThrows(OMException.class, + () -> ozoneBucket.getKey(keyFrom)); + assertEquals(omException.getResult(), OMException.ResultCodes.KEY_NOT_FOUND); + assertTrue(ozoneBucket.getKey(keyTo).isFile()); + + // Submit rename request to OM again. The request is cached so it will succeed. + omResponse = newLeader.getOmServerProtocol().processRequest(omRequest); + assertTrue(omResponse.getSuccess()); + } + + private void transferLeader(OzoneManager omLeader, OzoneManager newLeader) throws IOException { + LOG.info("Transfer leadership from {}(raft id {}) to {}(raft id {})", + omLeader.getOMNodeId(), omLeader.getOmRatisServer().getRaftPeerId(), + newLeader.getOMNodeId(), newLeader.getOmRatisServer().getRaftPeerId()); + + final SupportedRpcType rpc = SupportedRpcType.GRPC; + final RaftProperties properties = RatisHelper.newRaftProperties(rpc); + + // For now not making anything configurable, RaftClient is only used + // in SCM for DB updates of sub-ca certs go via Ratis. + RaftClient.Builder builder = RaftClient.newBuilder() + .setRaftGroup(omLeader.getOmRatisServer().getRaftGroup()) + .setLeaderId(null) + .setProperties(properties) + .setRetryPolicy( + RetryPolicies.retryUpToMaximumCountWithFixedSleep(120, + TimeDuration.valueOf(500, TimeUnit.MILLISECONDS))); + try (RaftClient raftClient = builder.build()) { + RaftClientReply reply = raftClient.admin().transferLeadership(newLeader.getOmRatisServer() + .getRaftPeerId(), 10 * 1000); + assertTrue(reply.isSuccess()); + } + } + private void validateVolumesList(Set expectedVolumes, Iterator volumeIterator) { int expectedCount = 0; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 72f1c3374b2..6c7cd89109e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; @@ -201,6 +202,7 @@ private void setupEnvironment(boolean aclEnabled, om.setScmTopologyClient(new ScmTopologyClient( new ScmBlockLocationTestingClient(null, null, 0))); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setSecretKeyClient(new SecretKeyTestClient()); om.start(); // Get OM client diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java index 6f86fcba70e..1a2e61b8800 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.security.UserGroupInformation; @@ -45,11 +46,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -68,6 +71,8 @@ public class TestRecursiveAclWithFSO { .createUserForTesting("user1", new String[] {"test1"}); private final UserGroupInformation user2 = UserGroupInformation .createUserForTesting("user2", new String[] {"test2"}); + private final UserGroupInformation user3 = UserGroupInformation + .createUserForTesting("user3", new String[] {"test3, test4"}); @BeforeEach public void init() throws Exception { @@ -213,6 +218,70 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { } } + @Test + public void testKeyDefaultACL() throws Exception { + String volumeName = "vol1"; + try (OzoneClient client = cluster.newClient()) { + ObjectStore objectStore = client.getObjectStore(); + objectStore.createVolume(volumeName); + addVolumeAcl(objectStore, volumeName, "world::a"); + + // verify volume ACLs. This volume will have 2 default ACLs, plus above one added + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(OZONE).build(); + List acls = objectStore.getAcl(obj); + assertEquals(3, acls.size()); + assertEquals(adminUser.getShortUserName(), acls.get(0).getName()); + OzoneAclConfig aclConfig = cluster.getConf().getObject(OzoneAclConfig.class); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(adminUser.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + assertEquals("WORLD", acls.get(2).getName()); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(2).getAclList().toArray()); + } + + // set LoginUser as user3 + UserGroupInformation.setLoginUser(user3); + try (OzoneClient client = cluster.newClient()) { + ObjectStore objectStore = client.getObjectStore(); + OzoneVolume volume = objectStore.getVolume(volumeName); + BucketArgs omBucketArgs = + BucketArgs.newBuilder().setStorageType(StorageType.DISK).build(); + String bucketName = "bucket"; + volume.createBucket(bucketName, omBucketArgs); + OzoneBucket ozoneBucket = volume.getBucket(bucketName); + + // verify bucket default ACLs + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volume.getName()) + .setBucketName(ozoneBucket.getName()).setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(OZONE).build(); + List acls = objectStore.getAcl(obj); + assertEquals(2, acls.size()); + assertEquals(user3.getShortUserName(), acls.get(0).getName()); + OzoneAclConfig aclConfig = cluster.getConf().getObject(OzoneAclConfig.class); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(user3.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + + // verify key default ACLs + int length = 10; + byte[] input = new byte[length]; + Arrays.fill(input, (byte) 96); + String keyName = UUID.randomUUID().toString(); + createKey(ozoneBucket, keyName, length, input); + obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volume.getName()) + .setBucketName(ozoneBucket.getName()).setKeyName(keyName) + .setResType(OzoneObj.ResourceType.KEY).setStoreType(OZONE).build(); + acls = objectStore.getAcl(obj); + assertEquals(2, acls.size()); + assertEquals(user3.getShortUserName(), acls.get(0).getName()); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(user3.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + } + } + private void removeAclsFromKey(ObjectStore objectStore, OzoneBucket ozoneBucket, String key) throws IOException { OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder().setKeyName(key) @@ -271,6 +340,16 @@ private void setVolumeAcl(ObjectStore objectStore, String volumeName, assertTrue(objectStore.setAcl(obj, OzoneAcl.parseAcls(aclString))); } + /** + * Helper function to add volume ACL. + */ + private void addVolumeAcl(ObjectStore objectStore, String volumeName, + String aclString) throws IOException { + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME).setStoreType(OZONE).build(); + assertTrue(objectStore.addAcl(obj, OzoneAcl.parseAcl(aclString))); + } + /** * Helper function to set bucket ACL. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java index abc21ed4351..4bdc29d6146 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java @@ -65,7 +65,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; @@ -80,6 +79,7 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; @@ -105,6 +105,8 @@ public class TestRangerBGSyncService { @TempDir private Path folder; + @TempDir + private String path; private MultiTenantAccessController accessController; private OMRangerBGSyncService bgSync; @@ -180,8 +182,6 @@ public void setUp() throws IOException { // Run as alice, so that Server.getRemoteUser() won't return null. mockRemoteUser(ugiAlice); - String omID = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omID); Path metaDirPath = Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); @@ -233,7 +233,7 @@ public void setUp() throws IOException { ozoneManager.getMetadataManager().getMetaTable().put( OzoneConsts.RANGER_OZONE_SERVICE_VERSION_KEY, String.valueOf(v)); return null; - }).when(omRatisServer).submitRequest(any(), any()); + }).when(omRatisServer).submitRequest(any(), any(), anyLong()); } catch (ServiceException e) { throw new RuntimeException(e); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index c123675565a..0d93436b0e5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -20,6 +20,7 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.HashMap; import java.util.List; import com.google.common.collect.Lists; @@ -100,6 +101,7 @@ import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.Iterator; +import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -125,8 +127,10 @@ import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage.CANCEL_ALREADY_CANCELLED_JOB; @@ -182,17 +186,21 @@ public abstract class TestOmSnapshot { private final boolean forceFullSnapshotDiff; private final boolean disableNativeDiff; private final AtomicInteger counter; + private final boolean createLinkedBucket; + private final Map linkedBuckets = new HashMap<>(); public TestOmSnapshot(BucketLayout newBucketLayout, boolean newEnableFileSystemPaths, boolean forceFullSnapDiff, - boolean disableNativeDiff) + boolean disableNativeDiff, + boolean createLinkedBucket) throws Exception { this.enabledFileSystemPaths = newEnableFileSystemPaths; this.bucketLayout = newBucketLayout; this.forceFullSnapshotDiff = forceFullSnapDiff; this.disableNativeDiff = disableNativeDiff; this.counter = new AtomicInteger(); + this.createLinkedBucket = createLinkedBucket; init(); } @@ -204,11 +212,7 @@ private void init() throws Exception { conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); - conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, - disableNativeDiff); - conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); - conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); - conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); + conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, disableNativeDiff); conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); @@ -222,7 +226,10 @@ private void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem - ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout); + ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBucket); + if (createLinkedBucket) { + this.linkedBuckets.put(ozoneBucket.getName(), ozoneBucket.getSourceBucket()); + } volumeName = ozoneBucket.getVolumeName(); bucketName = ozoneBucket.getName(); ozoneManager = cluster.getOzoneManager(); @@ -236,6 +243,17 @@ private void init() throws Exception { finalizeOMUpgrade(); } + private void createBucket(OzoneVolume volume, String bucketVal) throws IOException { + if (createLinkedBucket) { + String sourceBucketName = linkedBuckets.computeIfAbsent(bucketVal, (k) -> bucketVal + counter.incrementAndGet()); + volume.createBucket(sourceBucketName); + TestDataUtil.createLinkedBucket(client, volume.getName(), sourceBucketName, bucketVal); + this.linkedBuckets.put(bucketVal, sourceBucketName); + } else { + volume.createBucket(bucketVal); + } + } + private void stopKeyManager() throws IOException { KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); @@ -323,10 +341,10 @@ public void testListKey() throws Exception { store.createVolume(volumeB); OzoneVolume volA = store.getVolume(volumeA); OzoneVolume volB = store.getVolume(volumeB); - volA.createBucket(bucketA); - volA.createBucket(bucketB); - volB.createBucket(bucketA); - volB.createBucket(bucketB); + createBucket(volA, bucketA); + createBucket(volA, bucketB); + createBucket(volB, bucketA); + createBucket(volB, bucketB); OzoneBucket volAbucketA = volA.getBucket(bucketA); OzoneBucket volAbucketB = volA.getBucket(bucketB); OzoneBucket volBbucketA = volB.getBucket(bucketA); @@ -405,7 +423,7 @@ public void testListKeyOnEmptyBucket() String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); - vol.createBucket(bucket); + createBucket(vol, bucket); String snapshotKeyPrefix = createSnapshot(volume, bucket); OzoneBucket buc = vol.getBucket(bucket); Iterator keys = buc.listKeys(snapshotKeyPrefix); @@ -482,7 +500,7 @@ public void testListDeleteKey() throws Exception { String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); - vol.createBucket(bucket); + createBucket(vol, bucket); OzoneBucket volBucket = vol.getBucket(bucket); String key = "key-"; @@ -507,7 +525,7 @@ public void testListAddNewKey() throws Exception { String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); - vol.createBucket(bucket); + createBucket(vol, bucket); OzoneBucket bucket1 = vol.getBucket(bucket); String key1 = "key-1-"; @@ -557,7 +575,7 @@ public void testCreateSnapshotMissingMandatoryParams() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -601,11 +619,11 @@ private Set getDeletedKeysFromRocksDb( private void getOmKeyInfo(String volume, String bucket, String key) throws IOException { ResolvedBucket resolvedBucket = new ResolvedBucket(volume, bucket, - volume, bucket, "", bucketLayout); + volume, this.linkedBuckets.getOrDefault(bucket, bucket), "", bucketLayout); cluster.getOzoneManager().getKeyManager() .getKeyInfo(new OmKeyArgs.Builder() .setVolumeName(volume) - .setBucketName(bucket) + .setBucketName(this.linkedBuckets.getOrDefault(bucket, bucket)) .setKeyName(key).build(), resolvedBucket, null); } @@ -625,7 +643,7 @@ public void testSnapDiffHandlingReclaimWithLatestUse() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -663,7 +681,7 @@ public void testSnapDiffHandlingReclaimWithPreviousUse() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -710,7 +728,7 @@ public void testSnapDiffReclaimWithKeyRecreation() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -764,7 +782,7 @@ public void testSnapDiffReclaimWithKeyRename() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -809,7 +827,7 @@ public void testSnapDiffWith2RenamesAndDelete() throws Exception { String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -868,7 +886,7 @@ public void testSnapDiffWithKeyRenamesRecreationAndDelete() String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -912,7 +930,7 @@ public void testSnapDiffReclaimWithDeferredKeyDeletion() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; createSnapshot(testVolumeName, testBucketName, snap1); @@ -946,7 +964,7 @@ public void testSnapDiffWithNoEffectiveRename() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -988,7 +1006,7 @@ public void testSnapDiffWithDirectory() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -1046,7 +1064,7 @@ public void testSnapDiffWithDirectoryDelete() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -1082,7 +1100,7 @@ public void testSnapdiffWithObjectMetaModification() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -1114,7 +1132,7 @@ public void testSnapdiffWithFilesystemCreate() String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, testBucketName, testVolumeName); try (FileSystem fs = FileSystem.get(new URI(rootPath), cluster.getConf())) { @@ -1157,7 +1175,7 @@ public void testSnapDiffWithFilesystemDirectoryRenameOperation() String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, testBucketName, testVolumeName); try (FileSystem fs = FileSystem.get(new URI(rootPath), cluster.getConf())) { @@ -1200,7 +1218,7 @@ public void testSnapDiffWithFilesystemDirectoryMoveOperation() String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, testBucketName, testVolumeName); try (FileSystem fs = FileSystem.get(new URI(rootPath), cluster.getConf())) { @@ -1243,8 +1261,8 @@ public void testBucketDeleteIfSnapshotExists() throws Exception { String bucket2 = "buc-" + counter.incrementAndGet(); store.createVolume(volume1); OzoneVolume volume = store.getVolume(volume1); - volume.createBucket(bucket1); - volume.createBucket(bucket2); + createBucket(volume, bucket1); + createBucket(volume, bucket2); OzoneBucket bucketWithSnapshot = volume.getBucket(bucket1); OzoneBucket bucketWithoutSnapshot = volume.getBucket(bucket2); String key = "key-"; @@ -1254,7 +1272,7 @@ public void testBucketDeleteIfSnapshotExists() throws Exception { deleteKeys(bucketWithSnapshot); deleteKeys(bucketWithoutSnapshot); OMException omException = assertThrows(OMException.class, - () -> volume.deleteBucket(bucket1)); + () -> volume.deleteBucket(linkedBuckets.getOrDefault(bucket1, bucket1))); assertEquals(CONTAINS_SNAPSHOT, omException.getResult()); // TODO: Delete snapshot then delete bucket1 when deletion is implemented // no exception for bucket without snapshot @@ -1267,7 +1285,7 @@ public void testGetSnapshotInfo() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); createFileKey(bucket1, "key-1"); @@ -1282,12 +1300,12 @@ public void testGetSnapshotInfo() throws Exception { assertEquals(snap1, snapshot1.getName()); assertEquals(volume, snapshot1.getVolumeName()); - assertEquals(bucket, snapshot1.getBucketName()); + assertEquals(linkedBuckets.getOrDefault(bucket, bucket), snapshot1.getBucketName()); OzoneSnapshot snapshot2 = store.getSnapshotInfo(volume, bucket, snap2); assertEquals(snap2, snapshot2.getName()); assertEquals(volume, snapshot2.getVolumeName()); - assertEquals(bucket, snapshot2.getBucketName()); + assertEquals(linkedBuckets.getOrDefault(bucket, bucket), snapshot2.getBucketName()); testGetSnapshotInfoFailure(null, bucket, "snapshotName", "volume can't be null or empty."); @@ -1296,9 +1314,10 @@ public void testGetSnapshotInfo() throws Exception { testGetSnapshotInfoFailure(volume, bucket, null, "snapshot name can't be null or empty."); testGetSnapshotInfoFailure(volume, bucket, "snapshotName", - "Snapshot '/" + volume + "/" + bucket + "/snapshotName' is not found."); + "Snapshot '/" + volume + "/" + linkedBuckets.getOrDefault(bucket, bucket) + + "/snapshotName' is not found."); testGetSnapshotInfoFailure(volume, "bucketName", "snapshotName", - "Snapshot '/" + volume + "/bucketName/snapshotName' is not found."); + "Bucket not found: " + volume + "/bucketName"); } public void testGetSnapshotInfoFailure(String volName, @@ -1317,7 +1336,7 @@ public void testSnapDiffWithDirRename() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); bucket1.createDirectory("dir1"); String snap1 = "snap1"; @@ -1339,7 +1358,7 @@ public void testSnapDiff() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1477,14 +1496,12 @@ public void testSnapDiffCancel() throws Exception { assertEquals(CANCELLED, response.getJobStatus()); String fromSnapshotTableKey = - SnapshotInfo.getTableKey(volumeName, bucketName, fromSnapName); + SnapshotInfo.getTableKey(volumeName, linkedBuckets.getOrDefault(bucketName, bucketName), fromSnapName); String toSnapshotTableKey = - SnapshotInfo.getTableKey(volumeName, bucketName, toSnapName); + SnapshotInfo.getTableKey(volumeName, linkedBuckets.getOrDefault(bucketName, bucketName), toSnapName); - UUID fromSnapshotID = ozoneManager.getOmSnapshotManager() - .getSnapshotInfo(fromSnapshotTableKey).getSnapshotId(); - UUID toSnapshotID = ozoneManager.getOmSnapshotManager() - .getSnapshotInfo(toSnapshotTableKey).getSnapshotId(); + UUID fromSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshotTableKey).getSnapshotId(); + UUID toSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, toSnapshotTableKey).getSnapshotId(); // Construct SnapshotDiffJob table key. String snapDiffJobKey = fromSnapshotID + DELIMITER + toSnapshotID; @@ -1573,7 +1590,7 @@ public void testSnapDiffNoSnapshot() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1586,13 +1603,13 @@ public void testSnapDiffNoSnapshot() throws Exception { OMException omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, snap1, snap2, null, 0, false, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); // From snapshot is invalid omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, snap2, snap1, null, 0, false, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); } @Test @@ -1605,7 +1622,7 @@ public void testSnapDiffNonExistentUrl() throws Exception { String bucketb = "buck-" + counter.incrementAndGet(); store.createVolume(volumea); OzoneVolume volume1 = store.getVolume(volumea); - volume1.createBucket(bucketa); + createBucket(volume1, bucketa); OzoneBucket bucket1 = volume1.getBucket(bucketa); // Create Key1 and take 2 snapshots String key1 = "key-1-"; @@ -1618,16 +1635,16 @@ public void testSnapDiffNonExistentUrl() throws Exception { OMException omException = assertThrows(OMException.class, () -> store.snapshotDiff(volumea, bucketb, snap1, snap2, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(BUCKET_NOT_FOUND, omException.getResult()); // Volume is nonexistent omException = assertThrows(OMException.class, () -> store.snapshotDiff(volumeb, bucketa, snap2, snap1, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(VOLUME_NOT_FOUND, omException.getResult()); omException = assertThrows(OMException.class, () -> store.snapshotDiff(volumeb, bucketb, snap2, snap1, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(VOLUME_NOT_FOUND, omException.getResult()); } /** @@ -1644,7 +1661,7 @@ public void testSnapDiffWithKeyOverwrite() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -1669,7 +1686,7 @@ public void testSnapDiffMissingMandatoryParams() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1683,12 +1700,12 @@ public void testSnapDiffMissingMandatoryParams() throws Exception { OMException omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, snap1, nullstr, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); // From snapshot is empty omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, nullstr, snap1, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); // Bucket is empty assertThrows(IllegalArgumentException.class, () -> store.snapshotDiff(volume, nullstr, snap1, snap2, @@ -1706,8 +1723,8 @@ public void testSnapDiffMultipleBuckets() throws Exception { String bucketName2 = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucketName1); - volume1.createBucket(bucketName2); + createBucket(volume1, bucketName1); + createBucket(volume1, bucketName2); OzoneBucket bucket1 = volume1.getBucket(bucketName1); OzoneBucket bucket2 = volume1.getBucket(bucketName2); // Create Key1 and take snapshot @@ -1732,19 +1749,18 @@ public void testListSnapshotDiffWithInvalidParameters() String volume = "vol-" + RandomStringUtils.randomNumeric(5); String bucket = "buck-" + RandomStringUtils.randomNumeric(5); - String volBucketErrorMessage = "Provided volume name " + volume + - " or bucket name " + bucket + " doesn't exist"; + String volErrorMessage = "Volume not found: " + volume; Exception volBucketEx = assertThrows(OMException.class, () -> store.listSnapshotDiffJobs(volume, bucket, "", true)); - assertEquals(volBucketErrorMessage, + assertEquals(volErrorMessage, volBucketEx.getMessage()); // Create the volume and the bucket. store.createVolume(volume); OzoneVolume ozVolume = store.getVolume(volume); - ozVolume.createBucket(bucket); + createBucket(ozVolume, bucket); assertDoesNotThrow(() -> store.listSnapshotDiffJobs(volume, bucket, "", true)); @@ -1791,8 +1807,8 @@ public void testSnapDiffWithMultipleSSTs() throws Exception { String bucketName2 = "buck2"; store.createVolume(volumeName1); OzoneVolume volume1 = store.getVolume(volumeName1); - volume1.createBucket(bucketName1); - volume1.createBucket(bucketName2); + createBucket(volume1, bucketName1); + createBucket(volume1, bucketName2); OzoneBucket bucket1 = volume1.getBucket(bucketName1); OzoneBucket bucket2 = volume1.getBucket(bucketName2); String keyPrefix = "key-"; @@ -1828,7 +1844,7 @@ public void testDeleteSnapshotTwice() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1849,7 +1865,7 @@ public void testDeleteSnapshotFailure() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1876,7 +1892,7 @@ public void testDeleteSnapshotMissingMandatoryParams() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1901,9 +1917,10 @@ public void testSnapshotQuotaHandling() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); - bucket1.setQuota(OzoneQuota.parseQuota("102400000", "500")); + OzoneBucket originalBucket1 = volume1.getBucket(linkedBuckets.getOrDefault(bucket, bucket)); + originalBucket1.setQuota(OzoneQuota.parseQuota("102400000", "500")); volume1.setQuota(OzoneQuota.parseQuota("204800000", "1000")); long volUsedNamespaceInitial = volume1.getUsedNamespace(); @@ -1979,7 +1996,7 @@ private String createSnapshot(String volName, String buckName, OmSnapshotManager.getSnapshotPrefix(snapshotName); SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() - .get(SnapshotInfo.getTableKey(volName, buckName, snapshotName)); + .get(SnapshotInfo.getTableKey(volName, linkedBuckets.getOrDefault(buckName, buckName), snapshotName)); String snapshotDirName = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; @@ -2188,7 +2205,7 @@ public void testDayWeekMonthSnapshotCreationAndExpiration() throws Exception { String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); store.createVolume(volumeA); OzoneVolume volA = store.getVolume(volumeA); - volA.createBucket(bucketA); + createBucket(volA, bucketA); OzoneBucket volAbucketA = volA.getBucket(bucketA); int latestDayIndex = 0; @@ -2315,7 +2332,7 @@ private void checkDayWeekMonthSnapshotData(OzoneBucket ozoneBucketClient, // Validate keys metadata in active Ozone namespace OzoneKeyDetails ozoneKeyDetails = ozoneBucketClient.getKey(keyName); assertEquals(keyName, ozoneKeyDetails.getName()); - assertEquals(ozoneBucketClient.getName(), + assertEquals(linkedBuckets.getOrDefault(ozoneBucketClient.getName(), ozoneBucketClient.getName()), ozoneKeyDetails.getBucketName()); assertEquals(ozoneBucketClient.getVolumeName(), ozoneKeyDetails.getVolumeName()); @@ -2397,7 +2414,7 @@ public void testSnapshotCompactionDag() throws Exception { store.createVolume(volume1); OzoneVolume ozoneVolume = store.getVolume(volume1); - ozoneVolume.createBucket(bucket1); + createBucket(ozoneVolume, bucket1); OzoneBucket ozoneBucket1 = ozoneVolume.getBucket(bucket1); DBStore activeDbStore = ozoneManager.getMetadataManager().getStore(); @@ -2410,7 +2427,7 @@ public void testSnapshotCompactionDag() throws Exception { createSnapshot(volume1, bucket1, "bucket1-snap1"); activeDbStore.compactDB(); - ozoneVolume.createBucket(bucket2); + createBucket(ozoneVolume, bucket2); OzoneBucket ozoneBucket2 = ozoneVolume.getBucket(bucket2); for (int i = 100; i < 200; i++) { @@ -2423,7 +2440,7 @@ public void testSnapshotCompactionDag() throws Exception { createSnapshot(volume1, bucket2, "bucket2-snap1"); activeDbStore.compactDB(); - ozoneVolume.createBucket(bucket3); + createBucket(ozoneVolume, bucket3); OzoneBucket ozoneBucket3 = ozoneVolume.getBucket(bucket3); for (int i = 200; i < 300; i++) { @@ -2502,7 +2519,7 @@ public void testSnapshotReuseSnapName() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index 0849b900781..c43ec9c33c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneSnapshot; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -54,9 +55,9 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,9 +71,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.UUID; @@ -99,6 +102,7 @@ * Abstract class for OmSnapshot file system tests. */ @Timeout(120) +@TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class TestOmSnapshotFileSystem { protected static final String VOLUME_NAME = "volume" + RandomStringUtils.randomNumeric(5); @@ -107,26 +111,29 @@ public abstract class TestOmSnapshotFileSystem { protected static final String BUCKET_NAME_LEGACY = "bucket-legacy-" + RandomStringUtils.randomNumeric(5); - private static MiniOzoneCluster cluster = null; - private static OzoneClient client; - private static ObjectStore objectStore; - private static OzoneConfiguration conf; - private static OzoneManagerProtocol writeClient; - private static OzoneManager ozoneManager; - private static String keyPrefix; + private MiniOzoneCluster cluster = null; + private OzoneClient client; + private ObjectStore objectStore; + private OzoneConfiguration conf; + private OzoneManagerProtocol writeClient; + private OzoneManager ozoneManager; + private String keyPrefix; private final String bucketName; + private final boolean createLinkedBuckets; private FileSystem fs; private OzoneFileSystem o3fs; + private Map linkedBucketMaps = new HashMap<>(); private static final Logger LOG = LoggerFactory.getLogger(TestOmSnapshot.class); - public TestOmSnapshotFileSystem(String bucketName) { + public TestOmSnapshotFileSystem(String bucketName, boolean createLinkedBuckets) throws Exception { this.bucketName = bucketName; + this.createLinkedBuckets = createLinkedBuckets; + init(); } - @BeforeAll - public static void init() throws Exception { + private void init() throws Exception { conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); cluster = MiniOzoneCluster.newBuilder(conf).build(); @@ -138,12 +145,20 @@ public static void init() throws Exception { ozoneManager = cluster.getOzoneManager(); TestDataUtil.createVolume(client, VOLUME_NAME); - TestDataUtil.createBucket(client, VOLUME_NAME, + OzoneBucket bucket = TestDataUtil.createBucket(client, VOLUME_NAME, new BucketArgs.Builder().setBucketLayout(FILE_SYSTEM_OPTIMIZED).build(), - BUCKET_NAME_FSO); - TestDataUtil.createBucket(client, VOLUME_NAME, + BUCKET_NAME_FSO, createLinkedBuckets); + if (createLinkedBuckets) { + linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket()); + } + bucket = TestDataUtil.createBucket(client, VOLUME_NAME, new BucketArgs.Builder().setBucketLayout(LEGACY).build(), - BUCKET_NAME_LEGACY); + BUCKET_NAME_LEGACY, createLinkedBuckets); + if (createLinkedBuckets) { + linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket()); + } + + // stop the deletion services so that keys can still be read KeyManagerImpl keyManager = (KeyManagerImpl) ozoneManager.getKeyManager(); @@ -163,7 +178,7 @@ public void setupFsClient() throws IOException { } @AfterAll - public static void tearDown() throws Exception { + void tearDown() { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); @@ -273,7 +288,7 @@ public void testListKeysAtDifferentLevels() throws Exception { deleteSnapshot(snapshotName); String expectedMessage = String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName); OMException exception = assertThrows(OMException.class, () -> ozoneBucket.listKeys(keyPrefix + "a/", null)); assertEquals(expectedMessage, exception.getMessage()); @@ -376,7 +391,7 @@ private void createKey(OzoneBucket ozoneBucket, String key, int length, assertEquals(inputString, new String(read, StandardCharsets.UTF_8)); } - private static void setKeyPrefix(String s) { + private void setKeyPrefix(String s) { keyPrefix = s; } @@ -493,21 +508,21 @@ public void testListStatus() throws Exception { () -> fs.listStatus(snapshotRoot1)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName1), exception1.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName1), exception1.getMessage()); deleteSnapshot(snapshotName2); FileNotFoundException exception2 = assertThrows(FileNotFoundException.class, () -> fs.listStatus(snapshotRoot2)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName2), exception2.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName2), exception2.getMessage()); deleteSnapshot(snapshotName3); FileNotFoundException exception3 = assertThrows(FileNotFoundException.class, () -> fs.listStatus(snapshotParent3)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName3), exception3.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName3), exception3.getMessage()); } @Test @@ -542,7 +557,7 @@ public void testListStatusWithIntermediateDir() throws Exception { () -> fs.listStatus(snapshotParent)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } @Test @@ -578,7 +593,7 @@ public void testGetFileStatus() throws Exception { () -> fs.listStatus(snapshotParent)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } @Test @@ -619,7 +634,7 @@ void testReadFileFromSnapshot() throws Exception { () -> fs.open(fileInSnapshot)); assertEquals(String.format("FILE_NOT_FOUND: Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } private void createAndCommitKey(String keyName) throws IOException { @@ -669,7 +684,7 @@ public void testListStatusOnRoot() throws Exception { () -> fs.listStatus(snapshotRoot)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } /** @@ -726,7 +741,7 @@ public void testListStatusOnLargeDirectory() throws Exception { () -> fs.listStatus(snapshotRoot)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } private String createSnapshot(String snapshotName) @@ -736,9 +751,10 @@ private String createSnapshot(String snapshotName) writeClient.createSnapshot(VOLUME_NAME, bucketName, snapshotName); // wait till the snapshot directory exists + OzoneSnapshot snapshot = objectStore.getSnapshotInfo(VOLUME_NAME, bucketName, snapshotName); SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() - .get(SnapshotInfo.getTableKey(VOLUME_NAME, bucketName, snapshotName)); + .get(SnapshotInfo.getTableKey(snapshot.getVolumeName(), snapshot.getBucketName(), snapshotName)); String snapshotDirName = getSnapshotPath(conf, snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java index 47bdd8f3bd5..17adf6cce72 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java @@ -25,7 +25,7 @@ */ @Timeout(120) public class TestOmSnapshotFileSystemFso extends TestOmSnapshotFileSystem { - TestOmSnapshotFileSystemFso() { - super(BUCKET_NAME_FSO); + TestOmSnapshotFileSystemFso() throws Exception { + super(BUCKET_NAME_FSO, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFsoWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFsoWithLinkedBuckets.java new file mode 100644 index 00000000000..e9d1017cddb --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFsoWithLinkedBuckets.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +/** + * OmSnapshot file system tests for FSO. + */ +@Timeout(120) +public class TestOmSnapshotFileSystemFsoWithLinkedBuckets extends TestOmSnapshotFileSystem { + TestOmSnapshotFileSystemFsoWithLinkedBuckets() throws Exception { + super(BUCKET_NAME_FSO, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java index b8d81c31cf5..effaaa5d4e7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java @@ -25,7 +25,7 @@ */ @Timeout(120) public class TestOmSnapshotFileSystemLegacy extends TestOmSnapshotFileSystem { - TestOmSnapshotFileSystemLegacy() { - super(BUCKET_NAME_LEGACY); + TestOmSnapshotFileSystemLegacy() throws Exception { + super(BUCKET_NAME_LEGACY, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacyWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacyWithLinkedBuckets.java new file mode 100644 index 00000000000..61f92cc7c0b --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacyWithLinkedBuckets.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +/** + * OmSnapshot file system tests for Legacy. + */ +@Timeout(120) +public class TestOmSnapshotFileSystemLegacyWithLinkedBuckets extends TestOmSnapshotFileSystem { + TestOmSnapshotFileSystemLegacyWithLinkedBuckets() throws Exception { + super(BUCKET_NAME_LEGACY, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java index 06fbebb2efa..c303b24ad24 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java @@ -31,6 +31,6 @@ @Timeout(300) class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { TestOmSnapshotFsoWithNativeLib() throws Exception { - super(FILE_SYSTEM_OPTIMIZED, false, false, false); + super(FILE_SYSTEM_OPTIMIZED, false, false, false, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java new file mode 100644 index 00000000000..c499a705649 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.ozone.test.tag.Native; +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; + +/** + * Test OmSnapshot for FSO bucket type when native lib is enabled. + */ +@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) +@Timeout(300) +class TestOmSnapshotFsoWithNativeLibWithLinkedBuckets extends TestOmSnapshot { + TestOmSnapshotFsoWithNativeLibWithLinkedBuckets() throws Exception { + super(FILE_SYSTEM_OPTIMIZED, false, false, false, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java index c1782b73d19..26262916cb8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java @@ -29,6 +29,6 @@ public class TestOmSnapshotFsoWithoutNativeLib extends TestOmSnapshot { public TestOmSnapshotFsoWithoutNativeLib() throws Exception { - super(FILE_SYSTEM_OPTIMIZED, false, false, true); + super(FILE_SYSTEM_OPTIMIZED, false, false, true, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets.java new file mode 100644 index 00000000000..4387f77b3fc --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; + +/** + * Test OmSnapshot for FSO bucket type when native lib is disabled. + */ +@Timeout(300) +public class TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets extends TestOmSnapshot { + + public TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets() throws Exception { + super(FILE_SYSTEM_OPTIMIZED, false, false, true, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java index 13c8cb5fca3..bad51103a55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java @@ -29,6 +29,6 @@ public class TestOmSnapshotObjectStore extends TestOmSnapshot { public TestOmSnapshotObjectStore() throws Exception { - super(OBJECT_STORE, false, false, false); + super(OBJECT_STORE, false, false, false, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStoreWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStoreWithLinkedBuckets.java new file mode 100644 index 00000000000..64765e71718 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStoreWithLinkedBuckets.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.OBJECT_STORE; + +/** + * Test OmSnapshot for Object Store bucket type. + */ +@Timeout(300) +public class TestOmSnapshotObjectStoreWithLinkedBuckets extends TestOmSnapshot { + + public TestOmSnapshotObjectStoreWithLinkedBuckets() throws Exception { + super(OBJECT_STORE, false, false, false, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithBucketLinkingLegacy.java similarity index 84% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLegacy.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithBucketLinkingLegacy.java index bf4a2fee0de..f1ced6c4a80 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithBucketLinkingLegacy.java @@ -26,9 +26,9 @@ * Test OmSnapshot for Legacy bucket type. */ @Timeout(300) -public class TestOmSnapshotLegacy extends TestOmSnapshot { +public class TestOmSnapshotWithBucketLinkingLegacy extends TestOmSnapshot { - public TestOmSnapshotLegacy() throws Exception { - super(LEGACY, false, false, false); + public TestOmSnapshotWithBucketLinkingLegacy() throws Exception { + super(LEGACY, false, false, false, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithoutBucketLinkingLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithoutBucketLinkingLegacy.java new file mode 100644 index 00000000000..95549471e61 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithoutBucketLinkingLegacy.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; + +/** + * Test OmSnapshot for Legacy bucket type. + */ +@Timeout(300) +public class TestOmSnapshotWithoutBucketLinkingLegacy extends TestOmSnapshot { + + public TestOmSnapshotWithoutBucketLinkingLegacy() throws Exception { + super(LEGACY, false, false, false, false); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index 341b5b78c60..f178d00daa7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -21,10 +21,12 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -34,20 +36,27 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS; import static org.apache.ozone.test.LambdaTestUtils.await; @@ -72,6 +81,8 @@ public class TestOzoneManagerHASnapshot { public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setTimeDuration(OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test") @@ -265,4 +276,97 @@ private void createFileKey(OzoneBucket bucket, String keyName) fileKey.write(value); } } + + /** + * This is to simulate HDDS-11152 scenario. In which a follower's doubleBuffer is lagging and accumulates purgeKey + * and purgeSnapshot in same batch. + */ + @Test + public void testKeyAndSnapshotDeletionService() throws IOException, InterruptedException, TimeoutException { + OzoneManager omLeader = cluster.getOMLeader(); + OzoneManager omFollower; + + if (omLeader != cluster.getOzoneManager(0)) { + omFollower = cluster.getOzoneManager(0); + } else { + omFollower = cluster.getOzoneManager(1); + } + + int numKeys = 5; + List keys = new ArrayList<>(); + for (int i = 0; i < numKeys; i++) { + String keyName = "key-" + RandomStringUtils.randomNumeric(10); + createFileKey(ozoneBucket, keyName); + keys.add(keyName); + } + + // Stop the key deletion service so that deleted keys get trapped in the snapshots. + omLeader.getKeyManager().getDeletingService().suspend(); + // Stop the snapshot deletion service so that deleted keys get trapped in the snapshots. + omLeader.getKeyManager().getSnapshotDeletingService().suspend(); + + // Delete half of the keys + for (int i = 0; i < numKeys / 2; i++) { + ozoneBucket.deleteKey(keys.get(i)); + } + + String snapshotName = "snap-" + RandomStringUtils.randomNumeric(10); + createSnapshot(volumeName, bucketName, snapshotName); + + store.deleteSnapshot(volumeName, bucketName, snapshotName); + + // Pause double buffer on follower node to accumulate all the key purge, snapshot delete and purge transactions. + omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer().stopDaemon(); + + long keyDeleteServiceCount = omLeader.getKeyManager().getDeletingService().getRunCount().get(); + omLeader.getKeyManager().getDeletingService().resume(); + + GenericTestUtils.waitFor( + () -> omLeader.getKeyManager().getDeletingService().getRunCount().get() > keyDeleteServiceCount, + 1000, 60000); + + long snapshotDeleteServiceCount = omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get(); + omLeader.getKeyManager().getSnapshotDeletingService().resume(); + + GenericTestUtils.waitFor( + () -> omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get() > snapshotDeleteServiceCount, + 1000, 60000); + + String tableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + checkSnapshotIsPurgedFromDB(omLeader, tableKey); + + // Resume the DoubleBuffer and flush the pending transactions. + OzoneManagerDoubleBuffer omDoubleBuffer = + omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer(); + omDoubleBuffer.resume(); + CompletableFuture.supplyAsync(() -> { + omDoubleBuffer.flushTransactions(); + return null; + }); + omDoubleBuffer.awaitFlush(); + checkSnapshotIsPurgedFromDB(omFollower, tableKey); + } + + private void createSnapshot(String volName, String buckName, String snapName) throws IOException { + store.createSnapshot(volName, buckName, snapName); + + String tableKey = SnapshotInfo.getTableKey(volName, buckName, snapName); + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(cluster.getOMLeader(), tableKey); + String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo); + File snapshotDir = new File(fileName); + if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { + throw new IOException("Snapshot directory doesn't exist"); + } + } + + private void checkSnapshotIsPurgedFromDB(OzoneManager ozoneManager, String snapshotTableKey) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(() -> { + try { + return ozoneManager.getMetadataManager().getSnapshotInfoTable().get(snapshotTableKey) == null; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 60000); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java index cc5bca4d310..cb2d5cba3e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java @@ -51,6 +51,7 @@ import org.apache.ozone.rocksdiff.CompactionNode; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -377,6 +378,7 @@ private OzoneManager getLeaderOM() { @Test @DisplayName("testCompactionLogBackgroundService") + @Flaky("HDDS-11672") public void testCompactionLogBackgroundService() throws IOException, InterruptedException, TimeoutException { OzoneManager leaderOM = getLeaderOM(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java similarity index 52% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java index be4ea69095b..c3a58a1a211 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.snapshot; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -32,20 +33,26 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; +import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; import org.junit.jupiter.api.Order; @@ -53,25 +60,41 @@ import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Random; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; /** * Test Snapshot Deleting Service. @@ -80,10 +103,10 @@ @Timeout(300) @TestInstance(TestInstance.Lifecycle.PER_CLASS) @TestMethodOrder(OrderAnnotation.class) -public class TestSnapshotDeletingService { +public class TestSnapshotDeletingServiceIntegrationTest { private static final Logger LOG = - LoggerFactory.getLogger(TestSnapshotDeletingService.class); + LoggerFactory.getLogger(TestSnapshotDeletingServiceIntegrationTest.class); private static boolean omRatisEnabled = true; private static final ByteBuffer CONTENT = ByteBuffer.allocate(1024 * 1024 * 16); @@ -108,6 +131,7 @@ public void setup() throws Exception { 1, StorageUnit.MB); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); + conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT, 10000, TimeUnit.MILLISECONDS); conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 500); @@ -147,7 +171,7 @@ public void testSnapshotSplitAndMove() throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); - createSnapshotDataForBucket1(); + createSnapshotDataForBucket(bucket1); assertTableRowCount(snapshotInfoTable, 2); GenericTestUtils.waitFor(() -> snapshotDeletingService @@ -174,7 +198,7 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { om.getMetadataManager().getSnapshotInfoTable(); runIndividualTest = false; - createSnapshotDataForBucket1(); + createSnapshotDataForBucket(bucket1); BucketArgs bucketArgs = new BucketArgs.Builder() .setBucketLayout(BucketLayout.LEGACY) @@ -425,7 +449,7 @@ public void testSnapshotWithFSO() throws Exception { while (iterator.hasNext()) { Table.KeyValue next = iterator.next(); String activeDBDeletedKey = next.getKey(); - if (activeDBDeletedKey.matches(".*/key1.*")) { + if (activeDBDeletedKey.matches(".*/key1/.*")) { RepeatedOmKeyInfo activeDBDeleted = next.getValue(); OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager(); @@ -454,6 +478,229 @@ public void testSnapshotWithFSO() throws Exception { rcSnap1.close(); } + private DirectoryDeletingService getMockedDirectoryDeletingService(AtomicBoolean dirDeletionWaitStarted, + AtomicBoolean dirDeletionStarted) + throws InterruptedException, TimeoutException, IOException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getDirDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getDirDeletingService().getThreadCount() == 0, 1000, + 100000); + DirectoryDeletingService directoryDeletingService = Mockito.spy(new DirectoryDeletingService(10000, + TimeUnit.MILLISECONDS, 100000, ozoneManager, cluster.getConf(), 1)); + directoryDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> directoryDeletingService.getThreadCount() == 0, 1000, + 100000); + doAnswer(i -> { + // Wait for SDS to reach DDS wait block before processing any deleted directories. + GenericTestUtils.waitFor(dirDeletionWaitStarted::get, 1000, 100000); + dirDeletionStarted.set(true); + return i.callRealMethod(); + }).when(directoryDeletingService).getPendingDeletedDirInfo(); + return directoryDeletingService; + } + + private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletionWaitStarted, + AtomicBoolean keyDeletionStarted) + throws InterruptedException, TimeoutException, IOException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getDeletingService().getThreadCount() == 0, 1000, + 100000); + KeyManager keyManager = Mockito.spy(om.getKeyManager()); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); + KeyDeletingService keyDeletingService = Mockito.spy(new KeyDeletingService(ozoneManager, + ozoneManager.getScmClient().getBlockClient(), keyManager, 10000, + 100000, cluster.getConf(), false)); + keyDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000, + 100000); + when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> { + // wait for SDS to reach the KDS wait block before processing any key. + GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000); + keyDeletionStarted.set(true); + return i.callRealMethod(); + }); + return keyDeletingService; + } + + @SuppressWarnings("checkstyle:parameternumber") + private SnapshotDeletingService getMockedSnapshotDeletingService(KeyDeletingService keyDeletingService, + DirectoryDeletingService directoryDeletingService, + AtomicBoolean snapshotDeletionStarted, + AtomicBoolean keyDeletionWaitStarted, + AtomicBoolean dirDeletionWaitStarted, + AtomicBoolean keyDeletionStarted, + AtomicBoolean dirDeletionStarted, + OzoneBucket testBucket) + throws InterruptedException, TimeoutException, IOException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getSnapshotDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getSnapshotDeletingService().getThreadCount() == 0, 1000, + 100000); + KeyManager keyManager = Mockito.spy(om.getKeyManager()); + OmMetadataManagerImpl omMetadataManager = Mockito.spy((OmMetadataManagerImpl)om.getMetadataManager()); + SnapshotChainManager unMockedSnapshotChainManager = + ((OmMetadataManagerImpl)om.getMetadataManager()).getSnapshotChainManager(); + SnapshotChainManager snapshotChainManager = Mockito.spy(unMockedSnapshotChainManager); + OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager()); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(omMetadataManager.getSnapshotChainManager()).thenReturn(snapshotChainManager); + when(keyManager.getDeletingService()).thenReturn(keyDeletingService); + when(keyManager.getDirDeletingService()).thenReturn(directoryDeletingService); + SnapshotDeletingService snapshotDeletingService = Mockito.spy(new SnapshotDeletingService(10000, + 100000, ozoneManager)); + snapshotDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> snapshotDeletingService.getThreadCount() == 0, 1000, + 100000); + when(snapshotChainManager.iterator(anyBoolean())).thenAnswer(i -> { + Iterator itr = (Iterator) i.callRealMethod(); + return Lists.newArrayList(itr).stream().filter(uuid -> { + try { + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(om, snapshotChainManager, uuid); + return snapshotInfo.getBucketName().equals(testBucket.getName()) && + snapshotInfo.getVolumeName().equals(testBucket.getVolumeName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).iterator(); + }); + when(snapshotChainManager.getLatestGlobalSnapshotId()) + .thenAnswer(i -> unMockedSnapshotChainManager.getLatestGlobalSnapshotId()); + when(snapshotChainManager.getOldestGlobalSnapshotId()) + .thenAnswer(i -> unMockedSnapshotChainManager.getOldestGlobalSnapshotId()); + doAnswer(i -> { + // KDS wait block reached in SDS. + GenericTestUtils.waitFor(() -> { + return keyDeletingService.isRunningOnAOS(); + }, 1000, 100000); + keyDeletionWaitStarted.set(true); + return i.callRealMethod(); + }).when(snapshotDeletingService).waitForKeyDeletingService(); + doAnswer(i -> { + // DDS wait block reached in SDS. + GenericTestUtils.waitFor(directoryDeletingService::isRunningOnAOS, 1000, 100000); + dirDeletionWaitStarted.set(true); + return i.callRealMethod(); + }).when(snapshotDeletingService).waitForDirDeletingService(); + doAnswer(i -> { + // Assert KDS & DDS is not running when SDS starts moving entries & assert all wait block, KDS processing + // AOS block & DDS AOS block have been executed. + Assertions.assertTrue(keyDeletionWaitStarted.get()); + Assertions.assertTrue(dirDeletionWaitStarted.get()); + Assertions.assertTrue(keyDeletionStarted.get()); + Assertions.assertTrue(dirDeletionStarted.get()); + Assertions.assertFalse(keyDeletingService.isRunningOnAOS()); + Assertions.assertFalse(directoryDeletingService.isRunningOnAOS()); + snapshotDeletionStarted.set(true); + return i.callRealMethod(); + }).when(omSnapshotManager).getSnapshot(anyString(), anyString(), anyString()); + return snapshotDeletingService; + } + + @Test + @Order(4) + @Flaky("HDDS-11847") + public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exception { + AtomicBoolean keyDeletionWaitStarted = new AtomicBoolean(false); + AtomicBoolean dirDeletionWaitStarted = new AtomicBoolean(false); + AtomicBoolean keyDeletionStarted = new AtomicBoolean(false); + AtomicBoolean dirDeletionStarted = new AtomicBoolean(false); + AtomicBoolean snapshotDeletionStarted = new AtomicBoolean(false); + Random random = new Random(); + String bucketName = "bucket" + random.nextInt(); + BucketArgs bucketArgs = new BucketArgs.Builder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .build(); + OzoneBucket testBucket = TestDataUtil.createBucket( + client, VOLUME_NAME, bucketArgs, bucketName); + // mock keyDeletingService + KeyDeletingService keyDeletingService = getMockedKeyDeletingService(keyDeletionWaitStarted, keyDeletionStarted); + + // mock dirDeletingService + DirectoryDeletingService directoryDeletingService = getMockedDirectoryDeletingService(dirDeletionWaitStarted, + dirDeletionStarted); + + // mock snapshotDeletingService. + SnapshotDeletingService snapshotDeletingService = getMockedSnapshotDeletingService(keyDeletingService, + directoryDeletingService, snapshotDeletionStarted, keyDeletionWaitStarted, dirDeletionWaitStarted, + keyDeletionStarted, dirDeletionStarted, testBucket); + createSnapshotFSODataForBucket(testBucket); + List> renamesKeyEntries; + List>> deletedKeyEntries; + List> deletedDirEntries; + try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")) { + renamesKeyEntries = snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + deletedKeyEntries = snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + deletedDirEntries = snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000); + } + Thread keyDeletingThread = new Thread(() -> { + try { + keyDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Thread directoryDeletingThread = new Thread(() -> { + try { + directoryDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + ExecutorService snapshotDeletingThread = Executors.newFixedThreadPool(1); + Runnable snapshotDeletionRunnable = () -> { + try { + snapshotDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + keyDeletingThread.start(); + directoryDeletingThread.start(); + Future future = snapshotDeletingThread.submit(snapshotDeletionRunnable); + GenericTestUtils.waitFor(snapshotDeletionStarted::get, 1000, 30000); + future.get(); + try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")) { + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000)); + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000)); + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000)); + } + List> aosRenamesKeyEntries = + om.getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + List>> aosDeletedKeyEntries = + om.getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + List> aosDeletedDirEntries = + om.getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000); + renamesKeyEntries.forEach(entry -> Assertions.assertTrue(aosRenamesKeyEntries.contains(entry))); + deletedKeyEntries.forEach(entry -> Assertions.assertTrue(aosDeletedKeyEntries.contains(entry))); + deletedDirEntries.forEach(entry -> Assertions.assertTrue(aosDeletedDirEntries.contains(entry))); + Mockito.reset(snapshotDeletingService); + SnapshotInfo snap2 = SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2"); + Assertions.assertEquals(snap2.getSnapshotStatus(), SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED); + future = snapshotDeletingThread.submit(snapshotDeletionRunnable); + future.get(); + Assertions.assertThrows(IOException.class, () -> SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")); + cluster.restartOzoneManager(); + } + /* Flow ---- @@ -472,7 +719,7 @@ public void testSnapshotWithFSO() throws Exception { create snapshot3 delete snapshot2 */ - private void createSnapshotDataForBucket1() throws Exception { + private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); Table deletedTable = @@ -482,70 +729,147 @@ private void createSnapshotDataForBucket1() throws Exception { OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) om.getMetadataManager(); - TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key1", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key1", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); assertTableRowCount(keyTable, 2); // Create Snapshot 1. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap1"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap1"); assertTableRowCount(snapshotInfoTable, 1); // Overwrite bucket1key0, This is a newer version of the key which should // reclaimed as this is a different version of the key. - TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key2", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key2", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); // Key 1 cannot be reclaimed as it is still referenced by Snapshot 1. - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key1", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key1", false); // Key 2 is deleted here, which will be reclaimed here as // it is not being referenced by previous snapshot. - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key2", false); - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key0", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key2", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key0", false); assertTableRowCount(keyTable, 0); // one copy of bucket1key0 should also be reclaimed as it not same // but original deleted key created during overwrite should not be deleted assertTableRowCount(deletedTable, 2); // Create Snapshot 2. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap2"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); assertTableRowCount(snapshotInfoTable, 2); // Key 2 is removed from the active Db's // deletedTable when Snapshot 2 is taken. assertTableRowCount(deletedTable, 0); - TestDataUtil.createKey(bucket1, "bucket1key3", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key3", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key4", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key4", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key4", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key4", false); assertTableRowCount(keyTable, 1); assertTableRowCount(deletedTable, 0); // Create Snapshot 3. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap3"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap3"); assertTableRowCount(snapshotInfoTable, 3); SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable() - .get("/vol1/bucket1/bucket1snap2"); + .get(String.format("/%s/%s/%ssnap2", bucket.getVolumeName(), bucket.getName(), bucket.getName())); // Delete Snapshot 2. - client.getProxy().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap2"); + client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); assertTableRowCount(snapshotInfoTable, 2); - verifySnapshotChain(snapshotInfo, "/vol1/bucket1/bucket1snap3"); + verifySnapshotChain(snapshotInfo, String.format("/%s/%s/%ssnap3", bucket.getVolumeName(), bucket.getName(), + bucket.getName())); } + + /* + Flow + ---- + create dir0/key0 + create dir1/key1 + overwrite dir0/key0 + create dir2/key2 + create snap1 + rename dir1/key1 -> dir1/key10 + delete dir1/key10 + delete dir2 + create snap2 + delete snap2 + */ + private synchronized void createSnapshotFSODataForBucket(OzoneBucket bucket) throws Exception { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table deletedDirTable = + om.getMetadataManager().getDeletedDirTable(); + Table keyTable = + om.getMetadataManager().getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table dirTable = + om.getMetadataManager().getDirectoryTable(); + Table renameTable = om.getMetadataManager().getSnapshotRenamedTable(); + OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) + om.getMetadataManager(); + Map countMap = + metadataManager.listTables().entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> { + try { + return (int)metadataManager.countRowsInTable(e.getValue()); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + })); + TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 2); + assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 2); + + // Overwrite bucket1key0, This is a newer version of the key which should + // reclaimed as this is a different version of the key. + TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 3); + assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 3); + assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); + // create snap1 + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap1"); + bucket.renameKey("dir1/" + bucket.getName() + "key1", "dir1/" + bucket.getName() + "key10"); + bucket.renameKey("dir1/", "dir10/"); + assertTableRowCount(renameTable, countMap.get(renameTable.getName()) + 2); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + "dir10/" + bucket.getName() + "key10", false); + assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); + // Key 2 is deleted here, which will be reclaimed here as + // it is not being referenced by previous snapshot. + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), "dir2", true); + assertTableRowCount(deletedDirTable, countMap.get(deletedDirTable.getName()) + 1); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); + // Delete Snapshot 2. + client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); + assertTableRowCount(snapshotInfoTable, countMap.get(snapshotInfoTable.getName()) + 2); + } + + private void verifySnapshotChain(SnapshotInfo deletedSnapshot, String nextSnapshot) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 03df331087b..3be0725a009 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -79,6 +80,7 @@ public class TestSnapshotDirectoryCleaningService { public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500); + conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java index dff4cd046c9..3172838ab50 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java @@ -29,8 +29,8 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.segmentparser.OMRatisLogParser; -import org.apache.hadoop.ozone.segmentparser.SCMRatisLogParser; +import org.apache.hadoop.ozone.debug.segmentparser.OMRatisLogParser; +import org.apache.hadoop.ozone.debug.segmentparser.SCMRatisLogParser; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java index 66be107ebf6..275993d1362 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -126,6 +127,7 @@ public static void testSnapshot(MiniOzoneCluster cluster) throws Exception { } @Test + @Flaky("HDDS-11645") public void testExplicitRemovalOfNode() throws Exception { ReconNodeManager nodeManager = (ReconNodeManager) ozoneCluster.getReconServer() .getReconStorageContainerManager().getScmNodeManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index cba7311b3b4..4476cbc3e38 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -19,6 +19,7 @@ import java.time.Duration; import java.util.List; +import java.util.Map; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -51,6 +52,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode; +import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -236,6 +238,8 @@ public void testEmptyMissingContainerDownNode() throws Exception { // Bring down the Datanode that had the container replica. cluster.shutdownHddsDatanode(pipeline.getFirstNode()); + // Since we no longer add EMPTY_MISSING containers to the table, we should + // have zero EMPTY_MISSING containers in the DB but their information will be logged. LambdaTestUtils.await(25000, 1000, () -> { List allEmptyMissingContainers = reconContainerManager.getContainerSchemaManager() @@ -243,9 +247,18 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - return (allEmptyMissingContainers.size() == 1); - }); + // Check if EMPTY_MISSING containers are not added to the DB and their count is logged + Map> + unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() + .getUnhealthyContainerStateStatsMap(); + + // Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state + return allEmptyMissingContainers.size() == 0 && + unhealthyContainerStateStatsMap.get( + ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L) == 1; + }); // Now add a container to key mapping count as 3. This data is used to // identify if container is empty in terms of keys mapped to container. @@ -272,7 +285,17 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - return (allEmptyMissingContainers.isEmpty()); + + + Map> + unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() + .getUnhealthyContainerStateStatsMap(); + + // Return true if the size of the fetched containers is 0 and the log shows 0 for EMPTY_MISSING state + return allEmptyMissingContainers.size() == 0 && + unhealthyContainerStateStatsMap.get( + ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L) == 0; }); // Now remove keys from container. This data is used to @@ -283,8 +306,8 @@ public void testEmptyMissingContainerDownNode() throws Exception { reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); } - // Check existing container state in UNHEALTHY_CONTAINER table - // will be updated as EMPTY_MISSING + // Since we no longer add EMPTY_MISSING containers to the table, we should + // have zero EMPTY_MISSING containers in the DB but their information will be logged. LambdaTestUtils.await(25000, 1000, () -> { List allEmptyMissingContainers = reconContainerManager.getContainerSchemaManager() @@ -292,7 +315,16 @@ public void testEmptyMissingContainerDownNode() throws Exception { ContainerSchemaDefinition.UnHealthyContainerStates. EMPTY_MISSING, 0, 1000); - return (allEmptyMissingContainers.size() == 1); + + Map> + unhealthyContainerStateStatsMap = reconScm.getContainerHealthTask() + .getUnhealthyContainerStateStatsMap(); + + // Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state + return allEmptyMissingContainers.size() == 0 && + unhealthyContainerStateStatsMap.get( + ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L) == 1; }); // Now restart the cluster and verify the container is no longer missing. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java new file mode 100644 index 00000000000..6c40e69432f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java @@ -0,0 +1,541 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair.om; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMStorage; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * FSORepairTool test cases. + */ +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class TestFSORepairTool { + public static final Logger LOG = LoggerFactory.getLogger(TestFSORepairTool.class); + private static final ByteArrayOutputStream OUT = new ByteArrayOutputStream(); + private static final ByteArrayOutputStream ERR = new ByteArrayOutputStream(); + private static final PrintStream OLD_OUT = System.out; + private static final PrintStream OLD_ERR = System.err; + private static final String DEFAULT_ENCODING = UTF_8.name(); + private static MiniOzoneCluster cluster; + private static FileSystem fs; + private static OzoneClient client; + private static CommandLine cmd; + private static String dbPath; + private static FSORepairTool.Report vol1Report; + private static FSORepairTool.Report vol2Report; + private static FSORepairTool.Report fullReport; + private static FSORepairTool.Report emptyReport; + + @BeforeAll + public static void setup() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); + cluster.waitForClusterToBeReady(); + + // Init ofs. + final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); + + cmd = new OzoneRepair().getCmd(); + dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); + + // Build multiple connected and disconnected trees + FSORepairTool.Report report1 = buildConnectedTree("vol1", "bucket1", 10); + FSORepairTool.Report report2 = buildDisconnectedTree("vol2", "bucket1", 10); + FSORepairTool.Report report3 = buildConnectedTree("vol2", "bucket2", 10); + FSORepairTool.Report report4 = buildEmptyTree(); + + vol1Report = new FSORepairTool.Report(report1); + vol2Report = new FSORepairTool.Report(report2, report3); + fullReport = new FSORepairTool.Report(report1, report2, report3, report4); + emptyReport = new FSORepairTool.Report(report4); + + client = OzoneClientFactory.getRpcClient(conf); + ObjectStore store = client.getObjectStore(); + + // Create legacy and OBS buckets. + store.getVolume("vol1").createBucket("obs-bucket", + BucketArgs.newBuilder().setBucketLayout(BucketLayout.OBJECT_STORE) + .build()); + store.getVolume("vol1").createBucket("legacy-bucket", + BucketArgs.newBuilder().setBucketLayout(BucketLayout.LEGACY) + .build()); + + // Put a key in the legacy and OBS buckets. + OzoneOutputStream obsStream = store.getVolume("vol1") + .getBucket("obs-bucket") + .createKey("prefix/test-key", 3); + obsStream.write(new byte[]{1, 1, 1}); + obsStream.close(); + + OzoneOutputStream legacyStream = store.getVolume("vol1") + .getBucket("legacy-bucket") + .createKey("prefix/test-key", 3); + legacyStream.write(new byte[]{1, 1, 1}); + legacyStream.close(); + + // Stop the OM before running the tool + cluster.getOzoneManager().stop(); + } + + @BeforeEach + public void init() throws Exception { + System.setOut(new PrintStream(OUT, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(ERR, false, DEFAULT_ENCODING)); + } + + @AfterEach + public void clean() throws Exception { + // reset stream after each unit test + OUT.reset(); + ERR.reset(); + + // restore system streams + System.setOut(OLD_OUT); + System.setErr(OLD_ERR); + } + + @AfterAll + public static void reset() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + if (client != null) { + client.close(); + } + IOUtils.closeQuietly(fs); + } + + /** + * Test to check a connected tree with one bucket. + * The output remains the same in debug and repair mode as the tree is connected. + * @throws Exception + */ + @Order(1) + @Test + public void testConnectedTreeOneBucket() throws Exception { + String expectedOutput = serializeReport(vol1Report); + + // Test the connected tree in debug mode. + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol1", "-b", "bucket1"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + Assertions.assertEquals(expectedOutput, reportOutput); + + OUT.reset(); + ERR.reset(); + + // Running again in repair mode should give same results since the tree is connected. + String[] args1 = new String[] {"om", "fso-tree", "--db", dbPath, "--repair", "-v", "/vol1", "-b", "bucket1"}; + int exitCode1 = cmd.execute(args1); + assertEquals(0, exitCode1); + + String cliOutput1 = OUT.toString(DEFAULT_ENCODING); + String reportOutput1 = extractRelevantSection(cliOutput1); + Assertions.assertEquals(expectedOutput, reportOutput1); + } + + /** + * Test to verify the file size of the tree. + * @throws Exception + */ + @Order(2) + @Test + public void testReportedDataSize() throws Exception { + String expectedOutput = serializeReport(vol2Report); + + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol2"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + + Assertions.assertEquals(expectedOutput, reportOutput); + } + + /** + * Test to verify how the tool processes the volume and bucket filters. + * - Volume filter only. + * - Both volume and bucket filters. + * - Non-existent bucket. + * - Non-existent volume. + * - Using a bucket filter without specifying a volume. + */ + @Order(3) + @Test + public void testVolumeAndBucketFilter() throws Exception { + // When volume filter is passed + String[] args1 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1"}; + int exitCode1 = cmd.execute(args1); + assertEquals(0, exitCode1); + + String cliOutput1 = OUT.toString(DEFAULT_ENCODING); + String reportOutput1 = extractRelevantSection(cliOutput1); + String expectedOutput1 = serializeReport(vol1Report); + Assertions.assertEquals(expectedOutput1, reportOutput1); + + OUT.reset(); + ERR.reset(); + + // When both volume and bucket filters are passed + String[] args2 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1", + "--bucket", "bucket1"}; + int exitCode2 = cmd.execute(args2); + assertEquals(0, exitCode2); + + String cliOutput2 = OUT.toString(DEFAULT_ENCODING); + String reportOutput2 = extractRelevantSection(cliOutput2); + String expectedOutput2 = serializeReport(vol1Report); + Assertions.assertEquals(expectedOutput2, reportOutput2); + + OUT.reset(); + ERR.reset(); + + // When a non-existent bucket filter is passed + String[] args3 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1", + "--bucket", "bucket3"}; + int exitCode3 = cmd.execute(args3); + assertEquals(0, exitCode3); + String cliOutput3 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput3.contains("Bucket 'bucket3' does not exist in volume '/vol1'.")); + + OUT.reset(); + ERR.reset(); + + // When a non-existent volume filter is passed + String[] args4 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol5"}; + int exitCode4 = cmd.execute(args4); + assertEquals(0, exitCode4); + String cliOutput4 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput4.contains("Volume '/vol5' does not exist.")); + + OUT.reset(); + ERR.reset(); + + // When bucket filter is passed without the volume filter. + String[] args5 = new String[]{"om", "fso-tree", "--db", dbPath, "--bucket", "bucket1"}; + int exitCode5 = cmd.execute(args5); + assertEquals(0, exitCode5); + String cliOutput5 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput5.contains("--bucket flag cannot be used without specifying --volume.")); + } + + /** + * Test to verify that non-fso buckets, such as legacy and obs, are skipped during the process. + * @throws Exception + */ + @Order(4) + @Test + public void testNonFSOBucketsSkipped() throws Exception { + String[] args = new String[] {"om", "fso-tree", "--db", dbPath}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput.contains("Skipping non-FSO bucket /vol1/obs-bucket")); + Assertions.assertTrue(cliOutput.contains("Skipping non-FSO bucket /vol1/legacy-bucket")); + } + + /** + * If no file is present inside a vol/bucket, the report statistics should be zero. + * @throws Exception + */ + @Order(5) + @Test + public void testEmptyFileTrees() throws Exception { + String expectedOutput = serializeReport(emptyReport); + + // Run on an empty volume and bucket + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol-empty", "-b", "bucket-empty"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + Assertions.assertEquals(expectedOutput, reportOutput); + } + + /** + * Test in repair mode. This test ensures that: + * - The initial repair correctly resolves unreferenced objects. + * - Subsequent repair runs do not find any unreferenced objects to process. + * @throws Exception + */ + @Order(6) + @Test + public void testMultipleBucketsAndVolumes() throws Exception { + String expectedOutput = serializeReport(fullReport); + + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "--repair"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + Assertions.assertEquals(expectedOutput, reportOutput); + Assertions.assertTrue(cliOutput.contains("Unreferenced:\n\tDirectories: 1\n\tFiles: 3\n\tBytes: 30")); + + String[] args1 = new String[] {"om", "fso-tree", "--db", dbPath, "--repair"}; + int exitCode1 = cmd.execute(args1); + assertEquals(0, exitCode1); + String cliOutput1 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput1.contains("Unreferenced:\n\tDirectories: 0\n\tFiles: 0\n\tBytes: 0")); + } + + /** + * Validate cluster state after OM restart by checking the tables. + * @throws Exception + */ + @Order(7) + @Test + public void validateClusterAfterRestart() throws Exception { + cluster.getOzoneManager().restart(); + + // 4 volumes (/s3v, /vol1, /vol2, /vol-empty) + assertEquals(4, countTableEntries(cluster.getOzoneManager().getMetadataManager().getVolumeTable())); + // 6 buckets (vol1/bucket1, vol2/bucket1, vol2/bucket2, vol-empty/bucket-empty, vol/legacy-bucket, vol1/obs-bucket) + assertEquals(6, countTableEntries(cluster.getOzoneManager().getMetadataManager().getBucketTable())); + // 1 directory is unreferenced and moved to the deletedDirTable during repair mode. + assertEquals(1, countTableEntries(cluster.getOzoneManager().getMetadataManager().getDeletedDirTable())); + // 3 files are unreferenced and moved to the deletedTable during repair mode. + assertEquals(3, countTableEntries(cluster.getOzoneManager().getMetadataManager().getDeletedTable())); + } + + private int countTableEntries(Table table) throws Exception { + int count = 0; + try (TableIterator> iterator = table.iterator()) { + while (iterator.hasNext()) { + iterator.next(); + count++; + } + } + return count; + } + + private String extractRelevantSection(String cliOutput) { + int startIndex = cliOutput.indexOf("Reachable:"); + if (startIndex == -1) { + throw new AssertionError("Output does not contain 'Reachable' section."); + } + return cliOutput.substring(startIndex).trim(); + } + + private String serializeReport(FSORepairTool.Report report) { + return String.format( + "Reachable:%n\tDirectories: %d%n\tFiles: %d%n\tBytes: %d%n" + + "Unreachable:%n\tDirectories: %d%n\tFiles: %d%n\tBytes: %d%n" + + "Unreferenced:%n\tDirectories: %d%n\tFiles: %d%n\tBytes: %d", + report.getReachable().getDirs(), + report.getReachable().getFiles(), + report.getReachable().getBytes(), + report.getUnreachable().getDirs(), + report.getUnreachable().getFiles(), + report.getUnreachable().getBytes(), + report.getUnreferenced().getDirs(), + report.getUnreferenced().getFiles(), + report.getUnreferenced().getBytes() + ); + } + + /** + * Creates a tree with 3 reachable directories and 4 reachable files. + */ + private static FSORepairTool.Report buildConnectedTree(String volume, String bucket, int fileSize) throws Exception { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + fs.mkdirs(dir1); + fs.mkdirs(dir2); + fs.mkdirs(dir3); + + // Content to put in every file. + String data = new String(new char[fileSize]); + + FSDataOutputStream stream = fs.create(file1); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file2); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file3); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file4); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + + assertConnectedTreeReadable(volume, bucket); + + FSORepairTool.ReportStatistics reachableCount = + new FSORepairTool.ReportStatistics(3, 4, fileSize * 4L); + return new FSORepairTool.Report.Builder() + .setReachable(reachableCount) + .build(); + } + + private static FSORepairTool.Report buildEmptyTree() throws IOException { + fs.mkdirs(new Path("/vol-empty/bucket-empty")); + FSORepairTool.ReportStatistics reachableCount = + new FSORepairTool.ReportStatistics(0, 0, 0); + FSORepairTool.ReportStatistics unreachableCount = + new FSORepairTool.ReportStatistics(0, 0, 0); + FSORepairTool.ReportStatistics unreferencedCount = + new FSORepairTool.ReportStatistics(0, 0, 0); + return new FSORepairTool.Report.Builder() + .setReachable(reachableCount) + .setUnreachable(unreachableCount) + .setUnreferenced(unreferencedCount) + .build(); + } + + private static void assertConnectedTreeReadable(String volume, String bucket) throws IOException { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + Assertions.assertTrue(fs.exists(dir1)); + Assertions.assertTrue(fs.exists(dir2)); + Assertions.assertTrue(fs.exists(dir3)); + Assertions.assertTrue(fs.exists(file1)); + Assertions.assertTrue(fs.exists(file2)); + Assertions.assertTrue(fs.exists(file3)); + Assertions.assertTrue(fs.exists(file4)); + } + + /** + * Creates a tree with 1 reachable directory, 1 reachable file, 1 + * unreachable directory, and 3 unreachable files. + */ + private static FSORepairTool.Report buildDisconnectedTree(String volume, String bucket, int fileSize) + throws Exception { + buildConnectedTree(volume, bucket, fileSize); + + // Manually remove dir1. This should disconnect 3 of the files and 1 of + // the directories. + disconnectDirectory("dir1"); + + assertDisconnectedTreePartiallyReadable(volume, bucket); + + // dir1 does not count towards the unreachable directories the tool + // will see. It was deleted completely so the tool will never see it. + FSORepairTool.ReportStatistics reachableCount = + new FSORepairTool.ReportStatistics(1, 1, fileSize); + FSORepairTool.ReportStatistics unreferencedCount = + new FSORepairTool.ReportStatistics(1, 3, fileSize * 3L); + return new FSORepairTool.Report.Builder() + .setReachable(reachableCount) + .setUnreferenced(unreferencedCount) + .build(); + } + + private static void disconnectDirectory(String dirName) throws Exception { + Table dirTable = cluster.getOzoneManager().getMetadataManager().getDirectoryTable(); + try (TableIterator> iterator = dirTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + String key = entry.getKey(); + if (key.contains(dirName)) { + dirTable.delete(key); + break; + } + } + } + } + + private static void assertDisconnectedTreePartiallyReadable(String volume, String bucket) throws Exception { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + Assertions.assertFalse(fs.exists(dir1)); + Assertions.assertFalse(fs.exists(dir2)); + Assertions.assertTrue(fs.exists(dir3)); + Assertions.assertFalse(fs.exists(file1)); + Assertions.assertFalse(fs.exists(file2)); + Assertions.assertFalse(fs.exists(file3)); + Assertions.assertTrue(fs.exists(file4)); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java new file mode 100644 index 00000000000..ab56af670b3 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -0,0 +1,902 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.AmazonServiceException.ErrorType; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.AccessControlList; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.CanonicalGrantee; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.CreateBucketRequest; +import com.amazonaws.services.s3.model.Grantee; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsV2Request; +import com.amazonaws.services.s3.model.ListObjectsV2Result; +import com.amazonaws.services.s3.model.ListPartsRequest; +import com.amazonaws.services.s3.model.MultipartUpload; +import com.amazonaws.services.s3.model.MultipartUploadListing; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.ObjectTagging; +import com.amazonaws.services.s3.model.Owner; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.PartListing; +import com.amazonaws.services.s3.model.PartSummary; +import com.amazonaws.services.s3.model.Permission; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectInputStream; +import com.amazonaws.services.s3.model.S3ObjectSummary; +import com.amazonaws.services.s3.model.SetObjectAclRequest; +import com.amazonaws.services.s3.model.Tag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.amazonaws.services.s3.transfer.TransferManager; +import com.amazonaws.services.s3.transfer.TransferManagerBuilder; +import com.amazonaws.services.s3.transfer.Upload; +import com.amazonaws.services.s3.transfer.model.UploadResult; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.utils.InputSubstream; +import org.apache.ozone.test.OzoneTestBase; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.io.TempDir; + +import javax.xml.bind.DatatypeConverter; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Random; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.OzoneConsts.MB; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * This is an abstract class to test the AWS Java S3 SDK operations. + * This class should be extended for OM standalone and OM HA (Ratis) cluster setup. + * + * The test scenarios are adapted from + * - https://github.com/awsdocs/aws-doc-sdk-examples/tree/main/java/example_code/s3/ + * - https://github.com/ceph/s3-tests + * + * TODO: Currently we are using AWS SDK V1, need to also add tests for AWS SDK V2. + */ +@TestMethodOrder(MethodOrderer.MethodName.class) +public abstract class AbstractS3SDKV1Tests extends OzoneTestBase { + + /** + * There are still some unsupported S3 operations. + * Current unsupported S3 operations (non-exhaustive): + * - Cross Region Replication (CrossRegionReplication.java) + * - Versioned enabled buckets + * - DeleteObjectVersionEnabledBucket.java + * - DeleteMultipleObjectsVersionEnabledBucket.java + * - ListKeysVersioningEnabledBucket.java + * - Website configurations + * - WebsiteConfiguration.java + * - SetWebsiteConfiguration.java + * - GetWebsiteConfiguration.java + * - DeleteWebsiteConfiguration.java + * - S3 Event Notifications + * - EnableNotificationOnABucket.java + * - Object tags + * - GetObjectTags.java + * - GetObjectTags2.java + * - Bucket policy + * - SetBucketPolicy.java + * - GetBucketPolicy.java + * - DeleteBucketPolicy.java + * - Bucket lifecycle configuration + * - LifecycleConfiguration.java + * - Canned Bucket ACL + * - CreateBucketWithACL.java + * - Object ACL + * - SetAcl.java + * - ModifyACLExistingObject.java + * - GetAcl.java + * - S3 Encryption + * - S3Encrypt.java + * - S3EncryptV2.java + * - Client-side encryption + * - S3ClientSideEncryptionAsymmetricMasterKey.java + * - S3ClientSideEncryptionSymMasterKey.java + * - Server-side encryption + * - SpecifyServerSideEncryption.ajva + * - ServerSideEncryptionCopyObjectUsingHLWithSSEC.java + * - ServerSideEncryptionUsingClientSideEncryptionKey.java + * - Dual stack endpoints + * - DualStackEndpoints.java + * - Transfer acceleration + * - TransferAcceleration.java + * - Temp credentials + * - MakingRequestsWithFederatedTempCredentials.java + * - MakingRequestsWithIAMTempCredentials.java + * - Object archival + * - RestoreArchivedObject + * - KMS key + * - UploadObjectKMSKey.java + */ + + private static MiniOzoneCluster cluster = null; + private static AmazonS3 s3Client = null; + + /** + * Create a MiniOzoneCluster with S3G enabled for testing. + * @param conf Configurations to start the cluster + * @throws Exception exception thrown when waiting for the cluster to be ready. + */ + static void startCluster(OzoneConfiguration conf) throws Exception { + cluster = MiniOzoneCluster.newBuilder(conf) + .includeS3G(true) + .setNumDatanodes(5) + .build(); + cluster.waitForClusterToBeReady(); + s3Client = cluster.newS3Client(); + } + + /** + * Shutdown the MiniOzoneCluster. + */ + static void shutdownCluster() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + } + + public static void setCluster(MiniOzoneCluster cluster) { + AbstractS3SDKV1Tests.cluster = cluster; + } + + public static MiniOzoneCluster getCluster() { + return AbstractS3SDKV1Tests.cluster; + } + + @Test + public void testCreateBucket() { + final String bucketName = getBucketName(); + + Bucket b = s3Client.createBucket(bucketName); + + assertEquals(bucketName, b.getName()); + assertTrue(s3Client.doesBucketExist(bucketName)); + assertTrue(s3Client.doesBucketExistV2(bucketName)); + assertTrue(isBucketEmpty(b)); + } + + @Test + public void testBucketACLOperations() { + // TODO HDDS-11738: Uncomment assertions when bucket S3 ACL logic has been fixed + final String bucketName = getBucketName(); + + AccessControlList aclList = new AccessControlList(); + Owner owner = new Owner("owner", "owner"); + aclList.withOwner(owner); + Grantee grantee = new CanonicalGrantee("testGrantee"); + aclList.grantPermission(grantee, Permission.Read); + + + CreateBucketRequest createBucketRequest = new CreateBucketRequest(bucketName) + .withAccessControlList(aclList); + + s3Client.createBucket(createBucketRequest); + + //assertEquals(aclList, s3Client.getBucketAcl(bucketName)); + + aclList.grantPermission(grantee, Permission.Write); + s3Client.setBucketAcl(bucketName, aclList); + + //assertEquals(aclList, s3Client.getBucketAcl(bucketName)); + } + + @Test + public void testListBuckets() { + List bucketNames = new ArrayList<>(); + for (int i = 0; i <= 5; i++) { + String bucketName = getBucketName(String.valueOf(i)); + s3Client.createBucket(bucketName); + bucketNames.add(bucketName); + } + + List bucketList = s3Client.listBuckets(); + List listBucketNames = bucketList.stream() + .map(Bucket::getName).collect(Collectors.toList()); + + assertThat(listBucketNames).containsAll(bucketNames); + } + + @Test + public void testDeleteBucket() { + final String bucketName = getBucketName(); + + s3Client.createBucket(bucketName); + + s3Client.deleteBucket(bucketName); + + assertFalse(s3Client.doesBucketExist(bucketName)); + assertFalse(s3Client.doesBucketExistV2(bucketName)); + } + + @Test + public void testDeleteBucketNotExist() { + final String bucketName = getBucketName(); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName)); + + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testDeleteBucketNonEmptyWithKeys() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + + // Upload some objects to the bucket + for (int i = 1; i <= 10; i++) { + s3Client.putObject(bucketName, "key-" + i, RandomStringUtils.randomAlphanumeric(1024)); + } + + // Bucket deletion should fail if there are still keys in the bucket + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName) + ); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(409, ase.getStatusCode()); + assertEquals("BucketNotEmpty", ase.getErrorCode()); + + // Delete all the keys + ObjectListing objectListing = s3Client.listObjects(bucketName); + while (true) { + for (S3ObjectSummary summary : objectListing.getObjectSummaries()) { + s3Client.deleteObject(bucketName, summary.getKey()); + } + + // more object_listing to retrieve? + if (objectListing.isTruncated()) { + objectListing = s3Client.listNextBatchOfObjects(objectListing); + } else { + break; + } + } + } + + @Test + public void testDeleteBucketNonEmptyWithIncompleteMultipartUpload(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (5 * MB)); + + // Create an incomplete multipart upload by initiating multipart upload, + // uploading some parts, but not actually completing it. + String uploadId = initiateMultipartUpload(bucketName, keyName, null, null, null); + + uploadParts(bucketName, keyName, uploadId, multipartUploadFile, 1 * MB); + + // Bucket deletion should fail if there are still keys in the bucket + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName) + ); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(409, ase.getStatusCode()); + assertEquals("BucketNotEmpty", ase.getErrorCode()); + + // After the multipart upload is aborted, the bucket deletion should succeed + abortMultipartUpload(bucketName, keyName, uploadId); + + s3Client.deleteBucket(bucketName); + + assertFalse(s3Client.doesBucketExistV2(bucketName)); + } + + @Test + public void testPutObject() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + assertEquals("37b51d194a7513e45b56f6524f2d51f2", putObjectResult.getETag()); + } + + @Test + public void testPutObjectEmpty() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = ""; + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + assertEquals("d41d8cd98f00b204e9800998ecf8427e", putObjectResult.getETag()); + } + + @Test + public void testPutObjectACL() throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + final byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + String originalObjectETag = putObjectResult.getETag(); + assertTrue(s3Client.doesObjectExist(bucketName, keyName)); + + AccessControlList aclList = new AccessControlList(); + Owner owner = new Owner("owner", "owner"); + aclList.withOwner(owner); + Grantee grantee = new CanonicalGrantee("testGrantee"); + aclList.grantPermission(grantee, Permission.Read); + + SetObjectAclRequest setObjectAclRequest = new SetObjectAclRequest(bucketName, keyName, aclList); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.setObjectAcl(setObjectAclRequest)); + assertEquals("NotImplemented", ase.getErrorCode()); + assertEquals(501, ase.getStatusCode()); + assertEquals(ErrorType.Service, ase.getErrorType()); + + // Ensure that the object content remains unchanged + ObjectMetadata updatedObjectMetadata = s3Client.getObjectMetadata(bucketName, keyName); + assertEquals(originalObjectETag, updatedObjectMetadata.getETag()); + S3Object updatedObject = s3Client.getObject(bucketName, keyName); + + try (S3ObjectInputStream s3is = updatedObject.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(contentBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(content, bos.toString("UTF-8")); + } + } + + @Test + public void testGetObject() throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + final byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(contentBytes); + ObjectMetadata objectMetadata = new ObjectMetadata(); + Map userMetadata = new HashMap<>(); + userMetadata.put("key1", "value1"); + userMetadata.put("key2", "value2"); + objectMetadata.setUserMetadata(userMetadata); + + List tags = Arrays.asList(new Tag("tag1", "value1"), new Tag("tag2", "value2")); + ObjectTagging objectTagging = new ObjectTagging(tags); + + + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, keyName, is, objectMetadata) + .withTagging(objectTagging); + + s3Client.putObject(putObjectRequest); + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertEquals(tags.size(), s3Object.getTaggingCount()); + + try (S3ObjectInputStream s3is = s3Object.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(contentBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(content, bos.toString("UTF-8")); + } + } + + @Test + public void testGetObjectWithoutETag() throws Exception { + // Object uploaded using other protocols (e.g. ofs / ozone cli) will not + // have ETag. Ensure that ETag will not do ETag validation on GetObject if there + // is no ETag present. + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + String value = "sample value"; + byte[] valueBytes = value.getBytes(StandardCharsets.UTF_8); + + OzoneConfiguration conf = cluster.getConf(); + try (OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(conf)) { + ObjectStore store = ozoneClient.getObjectStore(); + + OzoneVolume volume = store.getS3Volume(); + OzoneBucket bucket = volume.getBucket(bucketName); + + try (OzoneOutputStream out = bucket.createKey(keyName, + valueBytes.length, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE), + Collections.emptyMap())) { + out.write(valueBytes); + } + } + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertNull(s3Object.getObjectMetadata().getETag()); + + try (S3ObjectInputStream s3is = s3Object.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(valueBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(value, bos.toString("UTF-8")); + } + } + + @Test + public void testListObjectsMany() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + final List keyNames = Arrays.asList( + getKeyName("1"), + getKeyName("2"), + getKeyName("3") + ); + + for (String keyName: keyNames) { + s3Client.putObject(bucketName, keyName, RandomStringUtils.randomAlphanumeric(5)); + } + + ListObjectsRequest listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName) + .withMaxKeys(2); + ObjectListing listObjectsResponse = s3Client.listObjects(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(2); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(0, 2)); + assertTrue(listObjectsResponse.isTruncated()); + + + listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName) + .withMaxKeys(2) + .withMarker(listObjectsResponse.getNextMarker()); + listObjectsResponse = s3Client.listObjects(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(1); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(2, keyNames.size())); + assertFalse(listObjectsResponse.isTruncated()); + } + + @Test + public void testListObjectsManyV2() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + final List keyNames = Arrays.asList( + getKeyName("1"), + getKeyName("2"), + getKeyName("3") + ); + + for (String keyName: keyNames) { + s3Client.putObject(bucketName, keyName, RandomStringUtils.randomAlphanumeric(5)); + } + + ListObjectsV2Request listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName) + .withMaxKeys(2); + ListObjectsV2Result listObjectsResponse = s3Client.listObjectsV2(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(2); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(0, 2)); + assertTrue(listObjectsResponse.isTruncated()); + + + listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName) + .withMaxKeys(2) + .withContinuationToken(listObjectsResponse.getNextContinuationToken()); + listObjectsResponse = s3Client.listObjectsV2(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(1); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(2, keyNames.size())); + assertFalse(listObjectsResponse.isTruncated()); + } + + @Test + public void testListObjectsBucketNotExist() { + final String bucketName = getBucketName(); + ListObjectsRequest listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName); + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listObjects(listObjectsRequest)); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testListObjectsV2BucketNotExist() { + final String bucketName = getBucketName(); + ListObjectsV2Request listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName); + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listObjectsV2(listObjectsRequest)); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testHighLevelMultipartUpload(@TempDir Path tempDir) throws Exception { + TransferManager tm = TransferManagerBuilder.standard() + .withS3Client(s3Client) + .build(); + + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + // The minimum file size to for TransferManager to initiate multipart upload is 16MB, so create a file + // larger than the threshold. + // See TransferManagerConfiguration#getMultipartUploadThreshold + int fileSize = (int) (20 * MB); + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, fileSize); + + // TransferManager processes all transfers asynchronously, + // so this call returns immediately. + Upload upload = tm.upload(bucketName, keyName, multipartUploadFile); + + upload.waitForCompletion(); + UploadResult uploadResult = upload.waitForUploadResult(); + assertEquals(bucketName, uploadResult.getBucketName()); + assertEquals(keyName, uploadResult.getKey()); + } + + @Test + public void testLowLevelMultipartUpload(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final Map userMetadata = new HashMap<>(); + userMetadata.put("key1", "value1"); + userMetadata.put("key2", "value2"); + + List tags = Arrays.asList(new Tag("tag1", "value1"), new Tag("tag2", "value2")); + + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (25 * MB)); + + multipartUpload(bucketName, keyName, multipartUploadFile, 5 * MB, null, userMetadata, tags); + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertEquals(keyName, s3Object.getKey()); + assertEquals(bucketName, s3Object.getBucketName()); + assertEquals(tags.size(), s3Object.getTaggingCount()); + + ObjectMetadata objectMetadata = s3Client.getObjectMetadata(bucketName, keyName); + assertEquals(userMetadata, objectMetadata.getUserMetadata()); + } + + @Test + public void testListMultipartUploads() { + final String bucketName = getBucketName(); + final String multipartKey1 = getKeyName("multipart1"); + final String multipartKey2 = getKeyName("multipart2"); + + s3Client.createBucket(bucketName); + + List uploadIds = new ArrayList<>(); + + String uploadId1 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null); + uploadIds.add(uploadId1); + String uploadId2 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null); + uploadIds.add(uploadId2); + // TODO: Currently, Ozone sorts based on uploadId instead of MPU init time within the same key. + // Remove this sorting step once HDDS-11532 has been implemented + Collections.sort(uploadIds); + String uploadId3 = initiateMultipartUpload(bucketName, multipartKey2, null, null, null); + uploadIds.add(uploadId3); + + // TODO: Add test for max uploads threshold and marker once HDDS-11530 has been implemented + ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName); + + MultipartUploadListing result = s3Client.listMultipartUploads(listMultipartUploadsRequest); + + List listUploadIds = result.getMultipartUploads().stream() + .map(MultipartUpload::getUploadId) + .collect(Collectors.toList()); + + assertEquals(uploadIds, listUploadIds); + } + + @Test + public void testListParts(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final long fileSize = 5 * MB; + final long partSize = 1 * MB; + final int maxParts = 2; + + s3Client.createBucket(bucketName); + + String uploadId = initiateMultipartUpload(bucketName, keyName, null, null, null); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) fileSize); + + List partETags = uploadParts(bucketName, keyName, uploadId, multipartUploadFile, partSize); + + List listPartETags = new ArrayList<>(); + int partNumberMarker = 0; + int expectedNumOfParts = 5; + PartListing listPartsResult; + do { + ListPartsRequest listPartsRequest = new ListPartsRequest(bucketName, keyName, uploadId) + .withMaxParts(maxParts) + .withPartNumberMarker(partNumberMarker); + listPartsResult = s3Client.listParts(listPartsRequest); + if (expectedNumOfParts > maxParts) { + assertTrue(listPartsResult.isTruncated()); + partNumberMarker = listPartsResult.getNextPartNumberMarker(); + expectedNumOfParts -= maxParts; + } else { + assertFalse(listPartsResult.isTruncated()); + } + for (PartSummary partSummary : listPartsResult.getParts()) { + listPartETags.add(new PartETag(partSummary.getPartNumber(), partSummary.getETag())); + } + } while (listPartsResult.isTruncated()); + + assertEquals(partETags.size(), listPartETags.size()); + for (int i = 0; i < partETags.size(); i++) { + assertEquals(partETags.get(i).getPartNumber(), listPartETags.get(i).getPartNumber()); + assertEquals(partETags.get(i).getETag(), listPartETags.get(i).getETag()); + } + } + + @Test + public void testListPartsNotFound() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + ListPartsRequest listPartsRequest = + new ListPartsRequest(bucketName, keyName, "nonexist"); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listParts(listPartsRequest)); + + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchUpload", ase.getErrorCode()); + } + + private boolean isBucketEmpty(Bucket bucket) { + ObjectListing objectListing = s3Client.listObjects(bucket.getName()); + return objectListing.getObjectSummaries().isEmpty(); + } + + private String getBucketName() { + return getBucketName(null); + } + + private String getBucketName(String suffix) { + return (getTestName() + "bucket" + suffix).toLowerCase(Locale.ROOT); + } + + private String getKeyName() { + return getKeyName(null); + } + + private String getKeyName(String suffix) { + return (getTestName() + "key" + suffix).toLowerCase(Locale.ROOT); + } + + private String multipartUpload(String bucketName, String key, File file, long partSize, String contentType, + Map userMetadata, List tags) throws Exception { + String uploadId = initiateMultipartUpload(bucketName, key, contentType, userMetadata, tags); + + List partETags = uploadParts(bucketName, key, uploadId, file, partSize); + + completeMultipartUpload(bucketName, key, uploadId, partETags); + + return uploadId; + } + + private String initiateMultipartUpload(String bucketName, String key, String contentType, + Map metadata, List tags) { + InitiateMultipartUploadRequest initRequest; + if (metadata == null || metadata.isEmpty()) { + initRequest = new InitiateMultipartUploadRequest(bucketName, key); + } else { + ObjectMetadata objectMetadata = new ObjectMetadata(); + objectMetadata.setUserMetadata(metadata); + if (contentType != null) { + objectMetadata.setContentType(contentType); + } + + initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata) + .withTagging(new ObjectTagging(tags)); + } + + InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); + assertEquals(bucketName, initResponse.getBucketName()); + assertEquals(key, initResponse.getKey()); + // TODO: Once bucket lifecycle configuration is supported, should check for "abortDate" and "abortRuleId" + + return initResponse.getUploadId(); + } + + // TODO: Also support async upload parts (similar to v2 asyncClient) + private List uploadParts(String bucketName, String key, String uploadId, File file, long partSize) + throws Exception { + // Create a list of ETag objects. You retrieve ETags for each object part + // uploaded, + // then, after each individual part has been uploaded, pass the list of ETags to + // the request to complete the upload. + List partETags = new ArrayList<>(); + + // Upload the file parts. + long filePosition = 0; + long fileLength = file.length(); + try (FileInputStream fileInputStream = new FileInputStream(file)) { + for (int i = 1; filePosition < fileLength; i++) { + // Because the last part could be less than 5 MB, adjust the part size as + // needed. + partSize = Math.min(partSize, (fileLength - filePosition)); + + // Create the request to upload a part. + UploadPartRequest uploadRequest = new UploadPartRequest() + .withBucketName(bucketName) + .withKey(key) + .withUploadId(uploadId) + .withPartNumber(i) + .withFileOffset(filePosition) + .withFile(file) + .withPartSize(partSize); + + // Upload the part and add the response's ETag to our list. + UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest); + PartETag partETag = uploadResult.getPartETag(); + assertEquals(i, partETag.getPartNumber()); + assertEquals(DatatypeConverter.printHexBinary( + calculateDigest(fileInputStream, 0, (int) partSize)).toLowerCase(), partETag.getETag()); + partETags.add(partETag); + + filePosition += partSize; + } + } + + return partETags; + } + + private void completeMultipartUpload(String bucketName, String key, String uploadId, List partETags) { + CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, key, + uploadId, partETags); + CompleteMultipartUploadResult compResponse = s3Client.completeMultipartUpload(compRequest); + assertEquals(bucketName, compResponse.getBucketName()); + assertEquals(key, compResponse.getKey()); + } + + private void abortMultipartUpload(String bucketName, String key, String uploadId) { + AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, key, uploadId); + s3Client.abortMultipartUpload(abortRequest); + } + + private static byte[] calculateDigest(InputStream inputStream, int skip, int length) throws Exception { + int numRead; + byte[] buffer = new byte[1024]; + + MessageDigest complete = MessageDigest.getInstance("MD5"); + if (skip > -1 && length > -1) { + inputStream = new InputSubstream(inputStream, skip, length); + } + + do { + numRead = inputStream.read(buffer); + if (numRead > 0) { + complete.update(buffer, 0, numRead); + } + } while (numRead != -1); + + return complete.digest(); + } + + private static void createFile(File newFile, int size) throws IOException { + // write random data so that filesystems with compression enabled (e.g. ZFS) + // can't compress the file + Random random = new Random(); + byte[] data = new byte[size]; + random.nextBytes(data); + + RandomAccessFile file = new RandomAccessFile(newFile, "rws"); + + file.write(data); + + file.getFD().sync(); + file.close(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java new file mode 100644 index 00000000000..5e9b3633be0 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis disabled. + */ +@Timeout(300) +public class TestS3SDKV1 extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false); + conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java new file mode 100644 index 00000000000..cb614453f69 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis enabled. + */ +public class TestS3SDKV1WithRatis extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, + false); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); + conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, + true); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java new file mode 100644 index 00000000000..571d4c64908 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis enabled and Streaming Write Pipeline. + */ +@Timeout(300) +public class TestS3SDKV1WithRatisStreaming extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, + false); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); + conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, + true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + conf.setBoolean(OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED, true); + // Ensure that all writes use datastream + conf.set(OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD, "0MB"); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 6f6c5439d8c..730a2479a51 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -259,5 +259,27 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); assertEquals(30, currentValidTxnNum); + + // Fail first 20 txns be failed + // increment retry count than threshold, count will be set to -1 + for (int i = 0; i < maxRetry + 1; i++) { + deletedBlockLog.incrementCount(txIds); + } + flush(); + + GetFailedDeletedBlocksTxnSubcommand getFailedBlockCommand = + new GetFailedDeletedBlocksTxnSubcommand(); + outContent.reset(); + cmd = new CommandLine(getFailedBlockCommand); + // set start transaction as 15 + cmd.parseArgs("-c", "5", "-s", "15"); + getFailedBlockCommand.execute(scmClient); + matchCount = 0; + p = Pattern.compile("\"txID\" : \\d+", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + while (m.find()) { + matchCount += 1; + } + assertEquals(5, matchCount); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java index fdc3ec00087..65cfb780fbf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java @@ -58,7 +58,8 @@ public class TestNSSummaryAdmin extends StandardOutputTestBase { @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + conf = ozoneAdmin.getOzoneConf(); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); conf.set(OZONE_RECON_ADDRESS_KEY, "localhost:9888"); cluster = MiniOzoneCluster.newBuilder(conf) @@ -67,9 +68,6 @@ public static void init() throws Exception { client = cluster.newClient(); store = client.getObjectStore(); - // Client uses server conf for this test - ozoneAdmin = new OzoneAdmin(conf); - volumeName = UUID.randomUUID().toString(); bucketOBS = UUID.randomUUID().toString(); bucketFSO = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index 9b1747b4c27..d8315cb427d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -36,9 +36,8 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneSnapshot; -import org.apache.hadoop.ozone.debug.DBScanner; import org.apache.hadoop.ozone.debug.OzoneDebug; -import org.apache.hadoop.ozone.debug.RDBParser; +import org.apache.hadoop.ozone.debug.ldb.RDBParser; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -84,6 +83,7 @@ public class TestOzoneDebugShell { private static MiniOzoneCluster cluster = null; private static OzoneClient client; + private static OzoneDebug ozoneDebugShell; private static OzoneConfiguration conf = null; @@ -101,7 +101,8 @@ protected static void startCluster() throws Exception { @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneDebugShell = new OzoneDebug(); + conf = ozoneDebugShell.getOzoneConf(); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); @@ -149,7 +150,6 @@ public void testLdbCliForOzoneSnapshot() throws Exception { StringWriter stdout = new StringWriter(); PrintWriter pstdout = new PrintWriter(stdout); CommandLine cmd = new CommandLine(new RDBParser()) - .addSubcommand(new DBScanner()) .setOut(pstdout); final String volumeName = UUID.randomUUID().toString(); final String bucketName = UUID.randomUUID().toString(); @@ -208,7 +208,6 @@ private int runChunkInfoCommand(String volumeName, String bucketName, getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY), "chunkinfo", bucketPath + Path.SEPARATOR + keyName }; - OzoneDebug ozoneDebugShell = new OzoneDebug(conf); int exitCode = ozoneDebugShell.execute(args); return exitCode; } @@ -220,7 +219,6 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, String[] args = new String[] { getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY), "chunkinfo", bucketPath + Path.SEPARATOR + keyName }; - OzoneDebug ozoneDebugShell = new OzoneDebug(conf); int exitCode = 1; try (GenericTestUtils.SystemOutCapturer capture = new GenericTestUtils .SystemOutCapturer()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index 328fc1ddd8c..e770a36c737 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -19,11 +19,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.debug.DBScanner; -import org.apache.hadoop.ozone.debug.RDBParser; +import org.apache.hadoop.ozone.debug.ldb.RDBParser; import org.apache.hadoop.ozone.om.OMStorage; -import org.apache.hadoop.ozone.repair.RDBRepair; -import org.apache.hadoop.ozone.repair.TransactionInfoRepair; +import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.apache.hadoop.ozone.repair.ldb.RDBRepair; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -38,6 +38,7 @@ import java.util.regex.Pattern; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static java.nio.charset.StandardCharsets.UTF_8; @@ -83,7 +84,7 @@ public void reset() { @Test public void testUpdateTransactionInfoTable() throws Exception { - CommandLine cmd = new CommandLine(new RDBRepair()).addSubcommand(new TransactionInfoRepair()); + CommandLine cmd = new CommandLine(new RDBRepair()); String dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); cluster.getOzoneManager().stop(); @@ -114,7 +115,7 @@ public void testUpdateTransactionInfoTable() throws Exception { } private String scanTransactionInfoTable(String dbPath) throws Exception { - CommandLine cmdDBScanner = new CommandLine(new RDBParser()).addSubcommand(new DBScanner()); + CommandLine cmdDBScanner = new CommandLine(new RDBParser()); String[] argsDBScanner = new String[] {"--db=" + dbPath, "scan", "--column_family", "transactionInfoTable"}; cmdDBScanner.execute(argsDBScanner); @@ -130,4 +131,27 @@ private String[] parseScanOutput(String output) throws IOException { throw new IllegalStateException("Failed to scan and find raft's highest term and index from TransactionInfo table"); } + @Test + public void testQuotaRepair() throws Exception { + CommandLine cmd = new OzoneRepair().getCmd(); + + String[] args = new String[] {"quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode, err::toString); + args = new String[] {"quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; + exitCode = cmd.execute(args); + assertEquals(0, exitCode); + GenericTestUtils.waitFor(() -> { + out.reset(); + // verify quota trigger is completed having non-zero lastRunFinishedTime + String[] targs = new String[]{"quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; + cmd.execute(targs); + try { + return !out.toString(DEFAULT_ENCODING).contains("\"lastRunFinishedTime\":\"\""); + } catch (Exception ex) { + // do nothing + } + return false; + }, 1000, 10000); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 89f068cdedf..4dc06d8eeb9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; -import java.net.URI; import java.util.Map; import java.util.Arrays; import java.util.HashSet; @@ -35,12 +34,12 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.hdds.cli.GenericCli; @@ -82,6 +81,7 @@ import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -111,6 +111,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; @@ -135,12 +136,13 @@ public class TestOzoneShellHA { LoggerFactory.getLogger(TestOzoneShellHA.class); private static final String DEFAULT_ENCODING = UTF_8.name(); - - private static File baseDir; + @TempDir + private static java.nio.file.Path path; + @TempDir + private static File kmsDir; private static File testFile; private static String testFilePathString; private static MiniOzoneHAClusterImpl cluster = null; - private static File testDir; private static MiniKMS miniKMS; private static OzoneClient client; private OzoneShell ozoneShell = null; @@ -154,6 +156,8 @@ public class TestOzoneShellHA { private static String omServiceId; private static int numOfOMs; + private static OzoneConfiguration ozoneConfiguration; + /** * Create a MiniOzoneCluster for testing with using distributed Ozone * handler type. @@ -163,26 +167,19 @@ public class TestOzoneShellHA { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); startKMS(); startCluster(conf); } protected static void startKMS() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestOzoneShellHA.class.getSimpleName()); - File kmsDir = new File(testDir, UUID.randomUUID().toString()); - assertTrue(kmsDir.mkdirs()); MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder(); miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build(); miniKMS.start(); } protected static void startCluster(OzoneConfiguration conf) throws Exception { - String path = GenericTestUtils.getTempPath( - TestOzoneShellHA.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); testFilePathString = path + OZONE_URI_DELIMITER + "testFile"; testFile = new File(testFilePathString); @@ -197,6 +194,8 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { getKeyProviderURI(miniKMS)); conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 10); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, 1); + ozoneConfiguration = conf; MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) @@ -219,14 +218,6 @@ public static void shutdown() { if (miniKMS != null) { miniKMS.stop(); } - - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - - if (testDir != null) { - FileUtil.fullyDelete(testDir, true); - } } @BeforeEach @@ -590,6 +581,7 @@ public void testAdminCmdListOpenFiles() final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); + clientConf.setBoolean("ozone.client.hbase.enhancements.allowed", true); clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); FileSystem fs = FileSystem.get(clientConf); @@ -709,6 +701,7 @@ public void testAdminCmdListOpenFilesWithDeletedKeys() final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); + clientConf.setBoolean("ozone.client.hbase.enhancements.allowed", true); clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); FileSystem fs = FileSystem.get(clientConf); @@ -825,6 +818,7 @@ public void testAdminCmdListOpenFilesWithOverwrittenKeys() final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf); + clientConf.setBoolean("ozone.client.hbase.enhancements.allowed", true); clientConf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); FileSystem fs = FileSystem.get(clientConf); @@ -953,6 +947,33 @@ private String getStdOut() throws UnsupportedEncodingException { return res; } + @Test + public void testOzoneAdminCmdListAllContainer() + throws UnsupportedEncodingException { + String[] args = new String[] {"container", "create", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + for (int i = 0; i < 2; i++) { + execute(ozoneAdminShell, args); + } + + String[] args1 = new String[] {"container", "list", "-c", "10", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args1); + //results will be capped at the maximum allowed count + assertEquals(1, getNumOfContainers()); + + String[] args2 = new String[] {"container", "list", "-a", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args2); + //Lists all containers + assertNotEquals(1, getNumOfContainers()); + } + + private int getNumOfContainers() + throws UnsupportedEncodingException { + return out.toString(DEFAULT_ENCODING).split("\"containerID\" :").length - 1; + } + /** * Helper function to retrieve Ozone client configuration for trash testing. * @param hostPrefix Scheme + Authority. e.g. ofs://om-service-test1 @@ -1144,8 +1165,6 @@ public void testListBucket() throws Exception { getClientConfForOFS(hostPrefix, cluster.getConf()); int pageSize = 20; clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize); - URI uri = FileSystem.getDefaultUri(clientConf); - clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); OzoneFsShell shell = new OzoneFsShell(clientConf); String volName = "testlistbucket"; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java index a0ad35500ca..b1dcbc0576e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java @@ -38,6 +38,8 @@ public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); startKMS(); startCluster(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 5d647507141..09770b097f8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -21,7 +21,6 @@ import com.google.common.base.Strings; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.retry.RetryInvocationHandler; @@ -44,6 +43,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -54,6 +54,7 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.nio.file.Path; import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; @@ -91,7 +92,8 @@ public class TestOzoneTenantShell { * Set the timeout for every test. */ - private static File baseDir; + @TempDir + private static Path path; private static File testFile; private static final File AUDIT_LOG_FILE = new File("audit.log"); @@ -137,11 +139,6 @@ public static void init() throws Exception { conf.setBoolean(OZONE_OM_TENANT_DEV_SKIP_RANGER, true); } - String path = GenericTestUtils.getTempPath( - TestOzoneTenantShell.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); - testFile = new File(path + OzoneConsts.OZONE_URI_DELIMITER + "testFile"); testFile.getParentFile().mkdirs(); testFile.createNewFile(); @@ -169,10 +166,6 @@ public static void shutdown() { cluster.shutdown(); } - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - if (AUDIT_LOG_FILE.exists()) { AUDIT_LOG_FILE.delete(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index 7c7f2b77ec5..cc508782a3d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -67,7 +67,8 @@ public class TestReconfigShell { */ @BeforeAll public static void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + OzoneConfiguration conf = ozoneAdmin.getOzoneConf(); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); String omServiceId = UUID.randomUUID().toString(); cluster = MiniOzoneCluster.newHABuilder(conf) @@ -77,7 +78,6 @@ public static void setup() throws Exception { .setNumDatanodes(DATANODE_COUNT) .build(); cluster.waitForClusterToBeReady(); - ozoneAdmin = new OzoneAdmin(cluster.getConf()); ozoneManager = cluster.getOzoneManager(); storageContainerManager = cluster.getStorageContainerManager(); datanodeServices = cluster.getHddsDatanodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java index d3d7c7766e7..cde7583956c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java @@ -26,9 +26,10 @@ import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.ratis.protocol.RaftPeer; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.io.IOException; import java.util.ArrayList; @@ -43,6 +44,7 @@ /** * Test transferLeadership with SCM HA setup. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class TestTransferLeadershipShell { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; @@ -50,6 +52,7 @@ public class TestTransferLeadershipShell { private String scmServiceId; private int numOfOMs = 3; private int numOfSCMs = 3; + private OzoneAdmin ozoneAdmin; private static final long SNAPSHOT_THRESHOLD = 5; @@ -58,9 +61,10 @@ public class TestTransferLeadershipShell { * * @throws IOException Exception */ - @BeforeEach + @BeforeAll public void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + conf = ozoneAdmin.getOzoneConf(); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, @@ -78,7 +82,7 @@ public void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterEach + @AfterAll public void shutdown() { if (cluster != null) { cluster.shutdown(); @@ -93,7 +97,6 @@ public void testOmTransfer() throws Exception { omList.remove(oldLeader); OzoneManager newLeader = omList.get(0); cluster.waitForClusterToBeReady(); - OzoneAdmin ozoneAdmin = new OzoneAdmin(conf); String[] args1 = {"om", "transfer", "-n", newLeader.getOMNodeId()}; ozoneAdmin.execute(args1); Thread.sleep(3000); @@ -117,7 +120,6 @@ public void testScmTransfer() throws Exception { scmList.remove(oldLeader); StorageContainerManager newLeader = scmList.get(0); - OzoneAdmin ozoneAdmin = new OzoneAdmin(conf); String[] args1 = {"scm", "transfer", "-n", newLeader.getScmId()}; ozoneAdmin.execute(args1); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 71f1b682d0f..861127916c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -29,9 +29,11 @@ import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -52,9 +54,10 @@ import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.tools.SimpleCopyListing; import org.apache.hadoop.tools.mapred.CopyMapper; -import org.apache.hadoop.tools.util.DistCpTestUtils; +import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.functional.RemoteIterators; +import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -534,8 +537,7 @@ public void testLargeFilesFromRemote() throws Exception { public void testSetJobId() throws Exception { describe("check jobId is set in the conf"); remoteFS.create(new Path(remoteDir, "file1")).close(); - DistCpTestUtils - .assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(), localDir.toString(), getDefaultCLIOptionsOrNull(), conf); assertThat(conf.get(CONF_LABEL_DISTCP_JOB_ID)) .withFailMessage("DistCp job id isn't set") @@ -719,7 +721,7 @@ public void testDistCpWithIterator() throws Exception { GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG); String options = "-useiterator -update -delete" + getDefaultCLIOptions(); - DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), dest.toString(), options, conf); // Check the target listing was also done using iterator. @@ -864,7 +866,7 @@ public void testDistCpWithFile() throws Exception { verifyPathExists(remoteFS, "", source); verifyPathExists(localFS, "", localDir); - DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), dest.toString(), getDefaultCLIOptionsOrNull(), conf); assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) @@ -889,7 +891,7 @@ public void testDistCpWithUpdateExistFile() throws Exception { verifyPathExists(remoteFS, "", source); verifyPathExists(localFS, "", dest); - DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), dest.toString(), "-delete -update" + getDefaultCLIOptions(), conf); assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) @@ -1015,4 +1017,37 @@ private void verifySkipAndCopyCounter(Job job, .withFailMessage("Mismatch in SKIP counter value") .isEqualTo(skipExpectedValue); } + + /** + * Runs distcp from src to dst, preserving XAttrs. Asserts the + * expected exit code. + * + * @param exitCode expected exit code + * @param src distcp src path + * @param dst distcp destination + * @param options distcp command line options + * @param conf Configuration to use + * @throws Exception if there is any error + */ + public static void assertRunDistCp(int exitCode, String src, String dst, + String options, Configuration conf) + throws Exception { + assertRunDistCp(exitCode, src, dst, + options == null ? new String[0] : options.trim().split(" "), conf); + } + + private static void assertRunDistCp(int exitCode, String src, String dst, + String[] options, Configuration conf) + throws Exception { + DistCp distCp = new DistCp(conf, null); + String[] optsArr = new String[options.length + 2]; + System.arraycopy(options, 0, optsArr, 0, options.length); + optsArr[optsArr.length - 2] = src; + optsArr[optsArr.length - 1] = dst; + + Assertions.assertThat(ToolRunner.run(conf, distCp, optsArr)) + .describedAs("Exit code of distcp %s", + Arrays.stream(optsArr).collect(Collectors.joining(" "))) + .isEqualTo(exitCode); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java new file mode 100644 index 00000000000..4908ecabf2e --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.utils; + +import com.google.common.base.Preconditions; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; + +/** + * A filter input stream implementation that exposes a range of the underlying input stream. + */ +public class InputSubstream extends FilterInputStream { + private static final int MAX_SKIPS = 100; + private long currentPosition; + private final long requestedSkipOffset; + private final long requestedLength; + private long markedPosition = 0; + + public InputSubstream(InputStream in, long skip, long length) { + super(in); + Preconditions.checkNotNull(in); + this.currentPosition = 0; + this.requestedSkipOffset = skip; + this.requestedLength = length; + } + + @Override + public int read() throws IOException { + byte[] b = new byte[1]; + int bytesRead = read(b, 0, 1); + + if (bytesRead == -1) { + return bytesRead; + } + return b[0]; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int count = 0; + while (currentPosition < requestedSkipOffset) { + long skippedBytes = super.skip(requestedSkipOffset - currentPosition); + if (skippedBytes == 0) { + count++; + if (count > MAX_SKIPS) { + throw new IOException( + "Unable to position the currentPosition from " + + currentPosition + " to " + + requestedSkipOffset); + } + } + currentPosition += skippedBytes; + } + + long bytesRemaining = + (requestedLength + requestedSkipOffset) - currentPosition; + if (bytesRemaining <= 0) { + return -1; + } + + len = (int) Math.min(len, bytesRemaining); + int bytesRead = super.read(b, off, len); + currentPosition += bytesRead; + + return bytesRead; + } + + @Override + public synchronized void mark(int readlimit) { + markedPosition = currentPosition; + super.mark(readlimit); + } + + @Override + public synchronized void reset() throws IOException { + currentPosition = markedPosition; + super.reset(); + } + + @Override + public void close() throws IOException { + // No-op operation since we don't want to close the underlying stream + // when the susbtream has been read + } + + @Override + public int available() throws IOException { + long bytesRemaining; + if (currentPosition < requestedSkipOffset) { + bytesRemaining = requestedLength; + } else { + bytesRemaining = + (requestedLength + requestedSkipOffset) - currentPosition; + } + + return (int) Math.min(bytesRemaining, super.available()); + } +} diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j.properties b/hadoop-ozone/integration-test/src/test/resources/log4j.properties index 564b729d5fc..c732a15c48a 100644 --- a/hadoop-ozone/integration-test/src/test/resources/log4j.properties +++ b/hadoop-ozone/integration-test/src/test/resources/log4j.properties @@ -21,3 +21,4 @@ log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR log4j.logger.org.apache.hadoop.hdds.utils.db.managed=TRACE log4j.logger.org.apache.hadoop.hdds.utils.db.CodecBuffer=DEBUG +log4j.logger.org.apache.hadoop.ozone.client.OzoneClientFactory=DEBUG diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 779ed2b785c..5ea2eb89dfa 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -84,7 +84,7 @@ hdds.container.ratis.log.appender.queue.byte-limit - 8MB + 32MB ozone.om.ratis.log.appender.queue.byte-limit diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 18d9584fbc8..2e68deeeb3b 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -20,16 +20,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-interface-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Client interface Apache Ozone Client Interface jar true + true @@ -48,7 +49,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_7 + hadoop-shaded-protobuf_3_25 @@ -187,13 +188,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 9e0f729be40..92c2b6b4cc5 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -97,8 +97,9 @@ enum Type { ListMultipartUploads = 82; - ListTrash = 91; - RecoverTrash = 92; + // Not used anymore due to HDDS-11251 + ListTrash = 91; // [deprecated = true] + RecoverTrash = 92; // [deprecated = true] RevokeS3Secret = 93; @@ -149,7 +150,12 @@ enum Type { RenameSnapshot = 131; ListOpenFiles = 132; QuotaRepair = 133; - GetServerDefaults = 134; + GetQuotaRepairStatus = 135; + StartQuotaRepair = 136; + SnapshotMoveTableKeys = 137; + PutObjectTagging = 140; + GetObjectTagging = 141; + DeleteObjectTagging = 142; } enum SafeMode { @@ -233,8 +239,9 @@ message OMRequest { optional UpdateGetS3SecretRequest updateGetS3SecretRequest = 82; optional ListMultipartUploadsRequest listMultipartUploadsRequest = 83; - optional ListTrashRequest listTrashRequest = 91; - optional RecoverTrashRequest RecoverTrashRequest = 92; + // Not used anymore due to HDDS-11251 + optional ListTrashRequest listTrashRequest = 91 [deprecated = true]; + optional RecoverTrashRequest RecoverTrashRequest = 92 [deprecated = true]; optional RevokeS3SecretRequest RevokeS3SecretRequest = 93; @@ -288,7 +295,14 @@ message OMRequest { optional RenameSnapshotRequest RenameSnapshotRequest = 129; optional ListOpenFilesRequest ListOpenFilesRequest = 130; optional QuotaRepairRequest QuotaRepairRequest = 131; - optional ServerDefaultsRequest ServerDefaultsRequest = 132; + + optional GetQuotaRepairStatusRequest GetQuotaRepairStatusRequest = 133; + optional StartQuotaRepairRequest StartQuotaRepairRequest = 134; + optional SnapshotMoveTableKeysRequest SnapshotMoveTableKeysRequest = 135; + + optional GetObjectTaggingRequest getObjectTaggingRequest = 140; + optional PutObjectTaggingRequest putObjectTaggingRequest = 141; + optional DeleteObjectTaggingRequest deleteObjectTaggingRequest = 142; } message OMResponse { @@ -362,8 +376,10 @@ message OMResponse { optional ListMultipartUploadsResponse listMultipartUploadsResponse = 82; - optional ListTrashResponse listTrashResponse = 91; - optional RecoverTrashResponse RecoverTrashResponse = 92; + // Not used anymore due to HDDS-11251 + optional ListTrashResponse listTrashResponse = 91 [deprecated = true]; + optional RecoverTrashResponse RecoverTrashResponse = 92 [deprecated = true]; + optional PurgePathsResponse purgePathsResponse = 93 [deprecated = true]; optional PurgeDirectoriesResponse purgeDirectoriesResponse = 108; @@ -414,7 +430,12 @@ message OMResponse { optional RenameSnapshotResponse RenameSnapshotResponse = 132; optional ListOpenFilesResponse ListOpenFilesResponse = 133; optional QuotaRepairResponse QuotaRepairResponse = 134; - optional ServerDefaultsResponse ServerDefaultsResponse = 135; + optional GetQuotaRepairStatusResponse GetQuotaRepairStatusResponse = 136; + optional StartQuotaRepairResponse StartQuotaRepairResponse = 137; + + optional GetObjectTaggingResponse getObjectTaggingResponse = 140; + optional PutObjectTaggingResponse putObjectTaggingResponse = 141; + optional DeleteObjectTaggingResponse deleteObjectTaggingResponse = 142; } enum Status { @@ -548,33 +569,39 @@ enum Status { /** This command acts as a list command for deleted keys that are still present in the deleted table on Ozone Manager. + Not used anymore due to HDDS-11251 */ message ListTrashRequest { - required string volumeName = 1; - required string bucketName = 2; - optional string startKeyName = 3; - optional string keyPrefix = 4; - optional int32 maxKeys = 5; + // option deprecated = true; + required string volumeName = 1 [deprecated = true]; + required string bucketName = 2 [deprecated = true]; + optional string startKeyName = 3 [deprecated = true]; + optional string keyPrefix = 4 [deprecated = true]; + optional int32 maxKeys = 5 [deprecated = true]; } message ListTrashResponse { - repeated RepeatedKeyInfo deletedKeys = 1; + // option deprecated = true; + repeated RepeatedKeyInfo deletedKeys = 1 [deprecated = true]; } /** This command acts as a recover command for deleted keys that are still in deleted table on Ozone Manager. + Not used anymore due to HDDS-11251 */ message RecoverTrashRequest { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - required string destinationBucket = 4; + // option deprecated = true; + required string volumeName = 1 [deprecated = true]; + required string bucketName = 2 [deprecated = true]; + required string keyName = 3 [deprecated = true]; + required string destinationBucket = 4 [deprecated = true]; } message RecoverTrashResponse { - required bool response = 1; + // option deprecated = true; + required bool response = 1 [deprecated = true]; } message VolumeInfo { @@ -870,6 +897,7 @@ message SnapshotInfo { optional uint64 exclusiveReplicatedSize = 18; // note: shared sizes can be calculated from: referenced - exclusive optional bool deepCleanedDeletedDir = 19; + optional bytes lastTransactionInfo = 20; } message SnapshotDiffJobProto { @@ -1362,6 +1390,8 @@ message PurgeKeysRequest { // if set, will purge keys in a snapshot DB instead of active DB optional string snapshotTableKey = 2; repeated SnapshotMoveKeyInfos keysToUpdate = 3; + // previous snapshotID can also be null & this field would be absent in older requests. + optional NullableUUID expectedPreviousSnapshotID = 4; } message PurgeKeysResponse { @@ -1384,6 +1414,12 @@ message PurgePathsResponse { message PurgeDirectoriesRequest { repeated PurgePathRequest deletedPath = 1; optional string snapshotTableKey = 2; + // previous snapshotID can also be null & this field would be absent in older requests. + optional NullableUUID expectedPreviousSnapshotID = 3; +} + +message NullableUUID { + optional hadoop.hdds.UUID uuid = 1; } message PurgeDirectoriesResponse { @@ -1433,7 +1469,8 @@ message OMTokenProto { optional string accessKeyId = 12; optional string signature = 13; optional string strToSign = 14; - optional string omServiceId = 15; + optional string omServiceId = 15 [deprecated = true]; + optional string secretKeyId = 16; } message SecretKeyProto { @@ -1613,6 +1650,7 @@ message ServiceInfo { repeated ServicePort servicePorts = 3; optional OMRoleInfo omRole = 4; optional int32 OMVersion = 5 [default = 0]; + optional FsServerDefaultsProto serverDefaults = 6; } message MultipartInfoInitiateRequest { @@ -1964,6 +2002,13 @@ message SnapshotMoveDeletedKeysRequest { repeated string deletedDirsToMove = 5; } +message SnapshotMoveTableKeysRequest { + optional hadoop.hdds.UUID fromSnapshotID = 1; + repeated SnapshotMoveKeyInfos deletedKeys = 2; + repeated SnapshotMoveKeyInfos deletedDirs = 3; + repeated hadoop.hdds.KeyValue renamedKeys = 4; +} + message SnapshotMoveKeyInfos { optional string key = 1; repeated KeyInfo keyInfos = 2; @@ -2205,15 +2250,19 @@ message BucketQuotaCount { message QuotaRepairResponse { } -message ServerDefaultsRequest { -} - message FsServerDefaultsProto { optional string keyProviderUri = 1; } -message ServerDefaultsResponse { - required FsServerDefaultsProto serverDefaults = 1; +message GetQuotaRepairStatusRequest { +} +message GetQuotaRepairStatusResponse { + optional string status = 1; +} +message StartQuotaRepairRequest { + repeated string buckets = 1; +} +message StartQuotaRepairResponse { } message OMLockDetailsProto { @@ -2223,6 +2272,28 @@ message OMLockDetailsProto { optional uint64 writeLockNanos = 4; } +message PutObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message PutObjectTaggingResponse { +} + +message GetObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message GetObjectTaggingResponse { + repeated hadoop.hdds.KeyValue tags = 1; +} + +message DeleteObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message DeleteObjectTaggingResponse { +} + /** The OM service that takes care of Ozone namespace. */ diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock index b8f5c395bae..0b28e0953c4 100644 --- a/hadoop-ozone/interface-client/src/main/resources/proto.lock +++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock @@ -3270,6 +3270,12 @@ "name": "exclusiveReplicatedSize", "type": "uint64", "optional": true + }, + { + "id": 19, + "name": "deepCleanedDeletedDir", + "type": "bool", + "optional": true } ] }, @@ -4137,6 +4143,12 @@ "name": "ecReplicationConfig", "type": "hadoop.hdds.ECReplicationConfig", "optional": true + }, + { + "id": 8, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -4687,6 +4699,29 @@ } ] }, + { + "name": "DeleteKeyError", + "fields": [ + { + "id": 1, + "name": "key", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "errorCode", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "errorMsg", + "type": "string", + "optional": true + } + ] + }, { "name": "DeleteKeysResponse", "fields": [ @@ -4701,6 +4736,12 @@ "name": "status", "type": "bool", "optional": true + }, + { + "id": 3, + "name": "errors", + "type": "DeleteKeyError", + "is_repeated": true } ] }, @@ -5636,6 +5677,12 @@ "name": "partName", "type": "string", "optional": true + }, + { + "id": 2, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -5699,6 +5746,12 @@ "name": "partName", "type": "string", "required": true + }, + { + "id": 3, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -5965,6 +6018,12 @@ "name": "size", "type": "uint64", "required": true + }, + { + "id": 5, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -6671,7 +6730,13 @@ "id": 2, "name": "updatedSnapshotDBKey", "type": "string", - "is_repeated": true + "is_repeated": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] } ] }, @@ -6682,6 +6747,36 @@ "id": 1, "name": "snapshotProperty", "type": "SnapshotProperty", + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 2, + "name": "snapshotKey", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "snapshotSize", + "type": "SnapshotSize", + "optional": true + }, + { + "id": 4, + "name": "deepCleanedDeletedDir", + "type": "bool", + "optional": true + }, + { + "id": 5, + "name": "deepCleanedDeletedKey", + "type": "bool", "optional": true } ] @@ -6693,18 +6788,53 @@ "id": 1, "name": "snapshotKey", "type": "string", - "optional": true + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] }, { "id": 2, "name": "exclusiveSize", "type": "uint64", - "optional": true + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] }, { "id": 3, "name": "exclusiveReplicatedSize", "type": "uint64", + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + } + ] + }, + { + "name": "SnapshotSize", + "fields": [ + { + "id": 1, + "name": "exclusiveSize", + "type": "uint64", + "optional": true + }, + { + "id": 2, + "name": "exclusiveReplicatedSize", + "type": "uint64", "optional": true } ] diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index ab1cc275ac1..cd2e1e34783 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -20,10 +20,10 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-interface-storage - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Storage Interface Apache Ozone Storage Interface jar diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index fb34d19a8bb..ae57c18354d 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -116,6 +116,22 @@ public interface OMMetadataManager extends DBStoreHAManager { */ String getBucketKey(String volume, String bucket); + /** + * Given a volume and bucket, return the corresponding DB key prefix. + * + * @param volume - Volume name + * @param bucket - Bucket name + */ + String getBucketKeyPrefix(String volume, String bucket); + + /** + * Given a volume and bucket, return the corresponding DB key prefix for FSO buckets. + * + * @param volume - Volume name + * @param bucket - Bucket name + */ + String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException; + /** * Given a volume, bucket and a key, return the corresponding DB key. * @@ -262,24 +278,6 @@ ListKeysResult listKeys(String volumeName, int maxKeys) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException; - /** * Returns snapshot info for volume/bucket snapshot path. * @param volumeName volume name @@ -304,18 +302,6 @@ ListSnapshotResponse listSnapshot( String volumeName, String bucketName, String snapshotPrefix, String prevSnapshot, int maxListResult) throws IOException; - /** - * Recover trash allows the user to recover the keys - * that were marked as deleted, but not actually deleted by Ozone Manager. - * @param volumeName - The volume name. - * @param bucketName - The bucket name. - * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. - * @return The result of recovering operation is success or not. - */ - boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException; - /** * Returns a list of volumes owned by a given user; if user is null, returns * all volumes. @@ -661,7 +647,7 @@ String getMultipartKey(long volumeId, long bucketId, long getBucketId(String volume, String bucket) throws IOException; /** - * Returns List<{@link BlockGroup}> for a key in the deletedTable. + * Returns {@code List} for a key in the deletedTable. * @param deletedKey - key to be purged from the deletedTable * @return {@link BlockGroup} */ diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java index ba54a44ac79..84203b1f65a 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java @@ -43,10 +43,15 @@ private TokenIdentifierCodec() { } @Override - public byte[] toPersistedFormat(OzoneTokenIdentifier object) { + public Class getTypeClass() { + return OzoneTokenIdentifier.class; + } + + @Override + public byte[] toPersistedFormat(OzoneTokenIdentifier object) throws IOException { Preconditions .checkNotNull(object, "Null object can't be converted to byte array."); - return object.toUniqueSerializedKey(); + return object.toProtoBuf().toByteArray(); } @Override @@ -55,11 +60,11 @@ public OzoneTokenIdentifier fromPersistedFormat(byte[] rawData) Preconditions.checkNotNull(rawData, "Null byte array can't converted to real object."); try { - OzoneTokenIdentifier object = OzoneTokenIdentifier.newInstance(); - return object.fromUniqueSerializedKey(rawData); + return OzoneTokenIdentifier.readProtoBuf(rawData); } catch (IOException ex) { try { - return OzoneTokenIdentifier.readProtoBuf(rawData); + OzoneTokenIdentifier object = OzoneTokenIdentifier.newInstance(); + return object.fromUniqueSerializedKey(rawData); } catch (InvalidProtocolBufferException e) { throw new IllegalArgumentException( "Can't encode the the raw data from the byte array", e); diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 30fe6d69b76..a2fdfb99c54 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -42,7 +42,8 @@ public final class OmPrefixInfo extends WithObjectID implements CopyObject CODEC = new DelegatedCodec<>( Proto2Codec.get(PersistedPrefixInfo.getDefaultInstance()), OmPrefixInfo::getFromProtobuf, - OmPrefixInfo::getProtobuf); + OmPrefixInfo::getProtobuf, + OmPrefixInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index ae427727def..8e78814eb6b 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -20,15 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-manager - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Manager Server Apache Ozone Manager Server jar + false @@ -474,12 +475,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - org.codehaus.mojo + dev.aspectj aspectj-maven-plugin ${aspectj-plugin.version} 1.8 1.8 + ${project.build.directory}/aspectj-build diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index 081477adf4d..5fd9fd6d595 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -109,7 +109,11 @@ public enum OMAction implements AuditAction { UPGRADE_CANCEL, UPGRADE_FINALIZE, - LIST_OPEN_FILES; + LIST_OPEN_FILES, + + GET_OBJECT_TAGGING, + PUT_OBJECT_TAGGING, + DELETE_OBJECT_TAGGING; @Override public String getAction() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 68429c36d08..e4174efcfcc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -43,7 +43,7 @@ * BucketManager uses MetadataDB to store bucket level information. * Keys used in BucketManager for storing data into MetadataDB * for BucketInfo: - * {volume/bucket} -> bucketInfo + * {volume/bucket} -> bucketInfo */ public class BucketManagerImpl implements BucketManager { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java index a83304ade45..bb682508524 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java @@ -63,7 +63,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_WORKERGROUP_SIZE_KEY; /** - * Separated network server for gRPC transport OzoneManagerService s3g->OM. + * Separated network server for gRPC transport OzoneManagerService s3g->OM. */ public class GrpcOzoneManagerServer { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 7a3312c0685..9f6d8b81c10 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -18,6 +18,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -28,7 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; @@ -36,7 +37,9 @@ import java.io.IOException; import java.time.Duration; +import java.util.ArrayList; import java.util.List; +import java.util.Map; /** * Handles key level commands. @@ -47,7 +50,6 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl { * Start key manager. * * @param configuration - * @throws IOException */ void start(OzoneConfiguration configuration); @@ -107,24 +109,6 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException; - /** * Returns a PendingKeysDeletion. It has a list of pending deletion key info * that ups to the given count.Each entry is a {@link BlockGroup}, which @@ -138,6 +122,29 @@ List listTrash(String volumeName, String bucketName, */ PendingKeysDeletion getPendingDeletionKeys(int count) throws IOException; + /** + * Returns a list rename entries from the snapshotRenamedTable. + * + * @param size max number of keys to return. + * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the + * underlying metadataManager. + * @throws IOException + */ + List> getRenamesKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException; + + + /** + * Returns a list deleted entries from the deletedTable. + * + * @param size max number of keys to return. + * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the + * underlying metadataManager. + * @throws IOException + */ + List>> getDeletedKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException; + /** * Returns the names of up to {@code count} open keys whose age is * greater than or equal to {@code expireThreshold}. @@ -171,6 +178,17 @@ ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count, List getExpiredMultipartUploads( Duration expireThreshold, int maxParts) throws IOException; + /** + * Look up an existing key from the OM table and retrieve the tags from + * the key info. + * + * @param args the args of the key provided by client. + * @param bucket the resolved parent bucket of the key. + * @return Map of the tag set associated with the key. + * @throws IOException + */ + Map getObjectTagging(OmKeyArgs args, ResolvedBucket bucket) throws IOException; + /** * Returns the metadataManager. * @return OMMetadataManager. @@ -235,6 +253,26 @@ OmMultipartUploadListParts listParts(String volumeName, String bucketName, */ Table.KeyValue getPendingDeletionDir() throws IOException; + /** + * Returns an iterator for pending deleted directories. + * @throws IOException + */ + TableIterator> getDeletedDirEntries( + String volume, String bucket) throws IOException; + + default List> getDeletedDirEntries(String volume, String bucket, int size) + throws IOException { + List> deletedDirEntries = new ArrayList<>(size); + try (TableIterator> iterator = + getDeletedDirEntries(volume, bucket)) { + while (deletedDirEntries.size() < size && iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + deletedDirEntries.add(Table.newKeyValue(kv.getKey(), kv.getValue())); + } + return deletedDirEntries; + } + } + /** * Returns all sub directories under the given parent directory. * @@ -262,7 +300,7 @@ List getPendingDeletionSubFiles(long volumeId, * Returns the instance of Directory Deleting Service. * @return Background service. */ - BackgroundService getDirDeletingService(); + DirectoryDeletingService getDirDeletingService(); /** * Returns the instance of Open Key Cleanup Service. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 2cb55135294..ccda21efc93 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -37,9 +37,15 @@ import java.util.Stack; import java.util.TreeMap; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import jakarta.annotation.Nonnull; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -56,6 +62,7 @@ import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.Table; @@ -70,6 +77,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -86,7 +94,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; @@ -98,18 +105,14 @@ import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static java.lang.String.format; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; @@ -122,8 +125,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; @@ -144,12 +145,16 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -158,15 +163,11 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; +import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; -import jakarta.annotation.Nonnull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Implementation of keyManager. */ @@ -182,7 +183,6 @@ public class KeyManagerImpl implements KeyManager { private final ScmClient scmClient; private final OMMetadataManager metadataManager; private final long scmBlockSize; - private final int listTrashKeysMax; private final OzoneBlockTokenSecretManager secretManager; private final boolean grpcBlockTokenEnabled; @@ -193,7 +193,7 @@ public class KeyManagerImpl implements KeyManager { private final KeyProviderCryptoExtension kmsProvider; private final boolean enableFileSystemPaths; - private BackgroundService dirDeletingService; + private DirectoryDeletingService dirDeletingService; private final OMPerformanceMetrics metrics; private BackgroundService openKeyCleanupService; @@ -218,9 +218,6 @@ public KeyManagerImpl(OzoneManager om, ScmClient scmClient, this.grpcBlockTokenEnabled = conf.getBoolean( HDDS_BLOCK_TOKEN_ENABLED, HDDS_BLOCK_TOKEN_ENABLED_DEFAULT); - this.listTrashKeysMax = conf.getInt( - OZONE_CLIENT_LIST_TRASH_KEYS_MAX, - OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT); this.enableFileSystemPaths = conf.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); @@ -235,6 +232,8 @@ public KeyManagerImpl(OzoneManager om, ScmClient scmClient, @Override public void start(OzoneConfiguration configuration) { + boolean isSnapshotDeepCleaningEnabled = configuration.getBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, + OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT); if (keyDeletingService == null) { long blockDeleteInterval = configuration.getTimeDuration( OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -246,7 +245,7 @@ public void start(OzoneConfiguration configuration) { TimeUnit.MILLISECONDS); keyDeletingService = new KeyDeletingService(ozoneManager, scmClient.getBlockClient(), this, blockDeleteInterval, - serviceTimeout, configuration); + serviceTimeout, configuration, isSnapshotDeepCleaningEnabled); keyDeletingService.start(); } @@ -260,8 +259,16 @@ public void start(OzoneConfiguration configuration) { OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - dirDeletingService = new DirectoryDeletingService(dirDeleteInterval, - TimeUnit.MILLISECONDS, serviceTimeout, ozoneManager, configuration); + int dirDeletingServiceCorePoolSize = + configuration.getInt(OZONE_THREAD_NUMBER_DIR_DELETION, + OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT); + if (dirDeletingServiceCorePoolSize <= 0) { + dirDeletingServiceCorePoolSize = 1; + } + dirDeletingService = + new DirectoryDeletingService(dirDeleteInterval, TimeUnit.MILLISECONDS, + serviceTimeout, ozoneManager, configuration, + dirDeletingServiceCorePoolSize); dirDeletingService.start(); } @@ -312,14 +319,14 @@ public void start(OzoneConfiguration configuration) { try { snapshotDeletingService = new SnapshotDeletingService( snapshotServiceInterval, snapshotServiceTimeout, - ozoneManager, scmClient.getBlockClient()); + ozoneManager); snapshotDeletingService.start(); } catch (IOException e) { LOG.error("Error starting Snapshot Deleting Service", e); } } - if (snapshotDirectoryCleaningService == null && + if (isSnapshotDeepCleaningEnabled && snapshotDirectoryCleaningService == null && ozoneManager.isFilesystemSnapshotEnabled()) { long dirDeleteInterval = configuration.getTimeDuration( OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, @@ -660,21 +667,6 @@ public ListKeysResult listKeys(String volumeName, String bucketName, return listKeysResult; } - @Override - public List listTrash(String volumeName, - String bucketName, String startKeyName, String keyPrefix, - int maxKeys) throws IOException { - - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkArgument(maxKeys <= listTrashKeysMax, - "The max keys limit specified is not less than the cluster " + - "allowed maximum limit."); - - return metadataManager.listTrash(volumeName, bucketName, - startKeyName, keyPrefix, maxKeys); - } - @Override public PendingKeysDeletion getPendingDeletionKeys(final int count) throws IOException { @@ -684,6 +676,60 @@ public PendingKeysDeletion getPendingDeletionKeys(final int count) .getPendingDeletionKeys(count, ozoneManager.getOmSnapshotManager()); } + private List> getTableEntries(String startKey, + TableIterator> tableIterator, + Function valueFunction, int size) throws IOException { + List> entries = new ArrayList<>(); + /* Seek to the start key if it not null. The next key in queue is ensured to start with the bucket + prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this. + */ + if (startKey != null) { + tableIterator.seek(startKey); + tableIterator.seekToFirst(); + } + int currentCount = 0; + while (tableIterator.hasNext() && currentCount < size) { + Table.KeyValue kv = tableIterator.next(); + if (kv != null) { + entries.add(Table.newKeyValue(kv.getKey(), valueFunction.apply(kv.getValue()))); + currentCount++; + } + } + return entries; + } + + private Optional getBucketPrefix(String volumeName, String bucketName, boolean isFSO) throws IOException { + // Bucket prefix would be empty if both volume & bucket is empty i.e. either null or "". + if (StringUtils.isEmpty(volumeName) && StringUtils.isEmpty(bucketName)) { + return Optional.empty(); + } else if (StringUtils.isEmpty(bucketName) || StringUtils.isEmpty(volumeName)) { + throw new IOException("One of volume : " + volumeName + ", bucket: " + bucketName + " is empty." + + " Either both should be empty or none of the arguments should be empty"); + } + return isFSO ? Optional.of(metadataManager.getBucketKeyPrefixFSO(volumeName, bucketName)) : + Optional.of(metadataManager.getBucketKeyPrefix(volumeName, bucketName)); + } + + @Override + public List> getRenamesKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + try (TableIterator> + renamedKeyIter = metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) { + return getTableEntries(startKey, renamedKeyIter, Function.identity(), size); + } + } + + @Override + public List>> getDeletedKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + try (TableIterator> + delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { + return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, size); + } + } + @Override public ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count, BucketLayout bucketLayout, Duration leaseThreshold) throws IOException { @@ -699,6 +745,16 @@ public List getExpiredMultipartUploads( maxParts); } + @Override + public Map getObjectTagging(OmKeyArgs args, ResolvedBucket bucket) throws IOException { + Preconditions.checkNotNull(args); + + OmKeyInfo value = captureLatencyNs(metrics.getLookupReadKeyInfoLatencyNs(), + () -> readKeyInfo(args, bucket.bucketLayout())); + + return value.getTags(); + } + @Override public OMMetadataManager getMetadataManager() { return metadataManager; @@ -710,7 +766,7 @@ public KeyDeletingService getDeletingService() { } @Override - public BackgroundService getDirDeletingService() { + public DirectoryDeletingService getDirDeletingService() { return dirDeletingService; } @@ -745,8 +801,7 @@ public boolean isSstFilteringSvcEnabled() { TimeUnit.MILLISECONDS); return serviceInterval != DISABLE_VALUE; } - - + @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException { @@ -1347,7 +1402,6 @@ private OmKeyInfo createFakeDirIfShould(String volume, String bucket, return null; } - private OzoneFileStatus getOzoneFileStatusFSO(OmKeyArgs args, String clientAddress, boolean skipFileNotFoundError) throws IOException { final String volumeName = args.getVolumeName(); @@ -1685,7 +1739,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, cacheKeyMap.clear(); List keyInfoList = new ArrayList<>(fileStatusList.size()); - fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add); + fileStatusList.stream().map(OzoneFileStatus::getKeyInfo).forEach(keyInfoList::add); if (args.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } @@ -1806,17 +1860,13 @@ private List buildFinalStatusList( } fileStatusFinalList.add(fileStatus); } - return sortPipelineInfo(fileStatusFinalList, keyInfoList, omKeyArgs, clientAddress); } - private List sortPipelineInfo( List fileStatusFinalList, List keyInfoList, OmKeyArgs omKeyArgs, String clientAddress) throws IOException { - - if (omKeyArgs.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } @@ -1998,6 +2048,13 @@ public Table.KeyValue getPendingDeletionDir() return null; } + @Override + public TableIterator> getDeletedDirEntries( + String volume, String bucket) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, true); + return metadataManager.getDeletedDirTable().iterator(bucketPrefix.orElse("")); + } + @Override public List getPendingDeletionSubDirs(long volumeId, long bucketId, OmKeyInfo parentInfo, long numEntries) throws IOException { @@ -2005,7 +2062,7 @@ public List getPendingDeletionSubDirs(long volumeId, long bucketId, parentInfo.getObjectID(), ""); long countEntries = 0; - Table dirTable = metadataManager.getDirectoryTable(); + Table dirTable = metadataManager.getDirectoryTable(); try (TableIterator> iterator = dirTable.iterator()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 86d8352697a..36edda8941d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -43,7 +43,7 @@ /** * Common class to do listing of resources after merging - * rocksDB table cache & actual table. + * rocksDB table cache and actual table. */ public class ListIterator { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java index 54e81f8825d..1ba4f3d1d13 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java @@ -36,4 +36,10 @@ public interface OMMXBean extends ServiceRuntimeInfo { String getRocksDbDirectory(); + /** + * Gets the OM hostname. + * + * @return the OM hostname for the datanode. + */ + String getHostname(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 1c0ec78cfb2..de4241b7ac4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -61,7 +61,6 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numKeyDeletes; private @Metric MutableCounterLong numBucketLists; private @Metric MutableCounterLong numKeyLists; - private @Metric MutableCounterLong numTrashKeyLists; private @Metric MutableCounterLong numVolumeLists; private @Metric MutableCounterLong numKeyCommits; private @Metric MutableCounterLong numKeyHSyncs; @@ -104,6 +103,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSetTime; private @Metric MutableCounterLong numGetKeyInfo; + private @Metric MutableCounterLong numGetObjectTagging; + private @Metric MutableCounterLong numPutObjectTagging; + private @Metric MutableCounterLong numDeleteObjectTagging; + // Failure Metrics private @Metric MutableCounterLong numVolumeCreateFails; private @Metric MutableCounterLong numVolumeUpdateFails; @@ -120,7 +123,6 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numKeyDeleteFails; private @Metric MutableCounterLong numBucketListFails; private @Metric MutableCounterLong numKeyListFails; - private @Metric MutableCounterLong numTrashKeyListFails; private @Metric MutableCounterLong numVolumeListFails; private @Metric MutableCounterLong numKeyCommitFails; private @Metric MutableCounterLong numBlockAllocationFails; @@ -186,6 +188,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numListOpenFilesFails; private @Metric MutableCounterLong getNumGetKeyInfoFails; + private @Metric MutableCounterLong numGetObjectTaggingFails; + private @Metric MutableCounterLong numPutObjectTaggingFails; + private @Metric MutableCounterLong numDeleteObjectTaggingFails; + private @Metric MutableCounterLong numRecoverLeaseFails; // Metrics for total amount of data written @@ -420,11 +426,6 @@ public void incNumKeyLists() { numKeyLists.incr(); } - public void incNumTrashKeyLists() { - numKeyOps.incr(); - numTrashKeyLists.incr(); - } - public void incNumVolumeLists() { numVolumeOps.incr(); numVolumeLists.incr(); @@ -836,10 +837,6 @@ public void incNumKeyListFails() { numKeyListFails.incr(); } - public void incNumTrashKeyListFails() { - numTrashKeyListFails.incr(); - } - public void incNumVolumeListFails() { numVolumeListFails.incr(); } @@ -924,6 +921,35 @@ public void incNumGetKeyInfoFails() { getNumGetKeyInfoFails.incr(); } + @Override + public void incNumGetObjectTagging() { + numGetObjectTagging.incr(); + numKeyOps.incr(); + } + + @Override + public void incNumGetObjectTaggingFails() { + numGetObjectTaggingFails.incr(); + } + + public void incNumPutObjectTagging() { + numPutObjectTagging.incr(); + numKeyOps.incr(); + } + + public void incNumPutObjectTaggingFails() { + numPutObjectTaggingFails.incr(); + } + + public void incNumDeleteObjectTagging() { + numDeleteObjectTagging.incr(); + numKeyOps.incr(); + } + + public void incNumDeleteObjectTaggingFails() { + numDeleteObjectTaggingFails.incr(); + } + @VisibleForTesting public long getNumVolumeCreates() { return numVolumeCreates.value(); @@ -994,11 +1020,6 @@ public long getNumKeyLists() { return numKeyLists.value(); } - @VisibleForTesting - public long getNumTrashKeyLists() { - return numTrashKeyLists.value(); - } - @VisibleForTesting public long getNumGetServiceLists() { return numGetServiceLists.value(); @@ -1099,11 +1120,6 @@ public long getNumKeyListFails() { return numKeyListFails.value(); } - @VisibleForTesting - public long getNumTrashKeyListFails() { - return numTrashKeyListFails.value(); - } - @VisibleForTesting public long getNumFSOps() { return numFSOps.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java index f68789b5394..2c66dd5035e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java @@ -129,7 +129,7 @@ boolean isTenantAdmin(UserGroupInformation callerUgi, String tenantId, boolean delegated); /** - * List all the user & accessIDs of all users that belong to this Tenant. + * List all the user and accessIDs of all users that belong to this Tenant. * Note this read is unprotected. See OzoneManager#listUserInTenant * @param tenantID * @return List of users diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java index 1d25a49fc56..a5954485bbd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java @@ -246,7 +246,6 @@ private void checkAcquiredAuthorizerWriteLock() throws OMException { * @param tenantId tenant name * @param userRoleName user role name * @param adminRoleName admin role name - * @return Tenant * @throws IOException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index a01855d1b63..fc1d9e0e96f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -144,6 +144,12 @@ public static void unregister() { @Metric(about = "readFromRockDb latency in listKeys") private MutableRate listKeysReadFromRocksDbLatencyNs; + @Metric(about = "resolveBucketLink latency in getObjectTagging") + private MutableRate getObjectTaggingResolveBucketLatencyNs; + + @Metric(about = "ACLs check in getObjectTagging") + private MutableRate getObjectTaggingAclCheckLatencyNs; + public void addLookupLatency(long latencyInNs) { lookupLatencyNs.add(latencyInNs); } @@ -248,7 +254,7 @@ public void setListKeysAveragePagination(long keyCount) { public void setListKeysOpsPerSec(float opsPerSec) { listKeysOpsPerSec.set(opsPerSec); } - + MutableRate getListKeysAclCheckLatencyNs() { return listKeysAclCheckLatencyNs; } @@ -280,4 +286,16 @@ public MutableRate getDeleteKeyResolveBucketAndAclCheckLatencyNs() { public void addListKeysReadFromRocksDbLatencyNs(long latencyInNs) { listKeysReadFromRocksDbLatencyNs.add(latencyInNs); } + + public MutableRate getGetObjectTaggingResolveBucketLatencyNs() { + return getObjectTaggingResolveBucketLatencyNs; + } + + public MutableRate getGetObjectTaggingAclCheckLatencyNs() { + return getObjectTaggingAclCheckLatencyNs; + } + + public void addGetObjectTaggingLatencyNs(long latencyInNs) { + getObjectTaggingAclCheckLatencyNs.add(latencyInNs); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 22d2b1e50b2..6698ece4a8d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -34,7 +34,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -109,6 +108,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_MAX_OPEN_FILES; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_MAX_OPEN_FILES_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; @@ -140,7 +141,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager, *

    * OM DB Schema: * - * + *

    +   * {@code
        * Common Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family     |        VALUE                                    |
    @@ -161,7 +163,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | transactionInfoTable| #TRANSACTIONINFO -> OMTransactionInfo          |
        * |----------------------------------------------------------------------|
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Multi-Tenant Tables:
        * |----------------------------------------------------------------------|
        * | tenantStateTable          | tenantId -> OmDBTenantState              |
    @@ -170,8 +175,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | principalToAccessIdsTable | userPrincipal -> OmDBUserPrincipalInfo   |
        * |----------------------------------------------------------------------|
    -   *
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Simple Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family     |        VALUE                                    |
    @@ -182,7 +189,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
        * |----------------------------------------------------------------------|
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Prefix Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family   |        VALUE                                      |
    @@ -196,7 +206,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |  deletedDirTable | /volumeId/bucketId/parentId/dirName/objectId ->   |
        * |                  |                                      KeyInfo      |
        * |----------------------------------------------------------------------|
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Snapshot Tables:
        * |-------------------------------------------------------------------------|
        * |  Column Family        |        VALUE                                    |
    @@ -210,6 +223,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |-------------------------------------------------------------------------|
        * | compactionLogTable    | dbTrxId-compactionTime -> compactionLogEntry    |
        * |-------------------------------------------------------------------------|
    +   * }
    +   * 
    */ public static final String USER_TABLE = "userTable"; @@ -388,8 +403,9 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) throws IOException { lock = new OmReadOnlyLock(); omEpoch = 0; - setStore(loadDB(conf, dir, name, true, - java.util.Optional.of(Boolean.TRUE), Optional.empty())); + int maxOpenFiles = conf.getInt(OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES, OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT); + + setStore(loadDB(conf, dir, name, true, Optional.of(Boolean.TRUE), maxOpenFiles, false, false)); initializeOmTables(CacheType.PARTIAL_CACHE, false); perfMetrics = null; } @@ -422,8 +438,7 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) checkSnapshotDirExist(checkpoint); } setStore(loadDB(conf, metaDir, dbName, false, - java.util.Optional.of(Boolean.TRUE), - Optional.of(maxOpenFiles), false, false)); + java.util.Optional.of(Boolean.TRUE), maxOpenFiles, false, false)); initializeOmTables(CacheType.PARTIAL_CACHE, false); } catch (IOException e) { stop(); @@ -565,7 +580,7 @@ public void start(OzoneConfiguration configuration) throws IOException { int maxOpenFiles = configuration.getInt(OZONE_OM_DB_MAX_OPEN_FILES, OZONE_OM_DB_MAX_OPEN_FILES_DEFAULT); - this.store = loadDB(configuration, metaDir, Optional.of(maxOpenFiles)); + this.store = loadDB(configuration, metaDir, maxOpenFiles); initializeOmTables(CacheType.FULL_CACHE, true); } @@ -573,33 +588,15 @@ public void start(OzoneConfiguration configuration) throws IOException { snapshotChainManager = new SnapshotChainManager(this); } - public static DBStore loadDB(OzoneConfiguration configuration, File metaDir) - throws IOException { - return loadDB(configuration, metaDir, Optional.empty()); - } - - public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, Optional maxOpenFiles) - throws IOException { - return loadDB(configuration, metaDir, OM_DB_NAME, false, - java.util.Optional.empty(), maxOpenFiles, true, true); - } - - public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, - String dbName, boolean readOnly, - java.util.Optional - disableAutoCompaction, - java.util.Optional maxOpenFiles) - throws IOException { - return loadDB(configuration, metaDir, dbName, readOnly, - disableAutoCompaction, maxOpenFiles, true, true); + public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, int maxOpenFiles) throws IOException { + return loadDB(configuration, metaDir, OM_DB_NAME, false, java.util.Optional.empty(), maxOpenFiles, true, true); } @SuppressWarnings("checkstyle:parameternumber") public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, String dbName, boolean readOnly, - java.util.Optional - disableAutoCompaction, - java.util.Optional maxOpenFiles, + java.util.Optional disableAutoCompaction, + int maxOpenFiles, boolean enableCompactionDag, boolean createCheckpointDirs) throws IOException { @@ -613,10 +610,10 @@ public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, .setPath(Paths.get(metaDir.getPath())) .setMaxFSSnapshots(maxFSSnapshots) .setEnableCompactionDag(enableCompactionDag) - .setCreateCheckpointDirs(createCheckpointDirs); + .setCreateCheckpointDirs(createCheckpointDirs) + .setMaxNumberOfOpenFiles(maxOpenFiles); disableAutoCompaction.ifPresent( dbStoreBuilder::disableDefaultCFAutoCompaction); - maxOpenFiles.ifPresent(dbStoreBuilder::setMaxNumberOfOpenFiles); return addOMTablesAndCodecs(dbStoreBuilder).build(); } @@ -824,7 +821,7 @@ public String getUserKey(String user) { /** * Given a volume and bucket, return the corresponding DB key. * - * @param volume - User name + * @param volume - Volume name * @param bucket - Bucket name */ @Override @@ -838,6 +835,22 @@ public String getBucketKey(String volume, String bucket) { return builder.toString(); } + /** + * {@inheritDoc} + */ + @Override + public String getBucketKeyPrefix(String volume, String bucket) { + return getOzoneKey(volume, bucket, OM_KEY_PREFIX); + } + + /** + * {@inheritDoc} + */ + @Override + public String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException { + return getOzoneKeyFSO(volume, bucket, OM_KEY_PREFIX); + } + @Override public String getOzoneKey(String volume, String bucket, String key) { StringBuilder builder = new StringBuilder() @@ -1385,15 +1398,6 @@ public ListKeysResult listKeys(String volumeName, String bucketName, return new ListKeysResult(result, isTruncated); } - // TODO: HDDS-2419 - Complete stub below for core logic - @Override - public List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException { - - List deletedKeys = new ArrayList<>(); - return deletedKeys; - } - @Override public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, String snapshotName) throws IOException { @@ -1470,18 +1474,6 @@ public ListSnapshotResponse listSnapshot( return new ListSnapshotResponse(snapshotInfos, lastSnapshot); } - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - - /* TODO: HDDS-2425 and HDDS-2426 - core logic stub would be added in later patch. - */ - - boolean recoverOperation = true; - return recoverOperation; - } - /** * @param userName volume owner, null for listing all volumes. */ @@ -1616,11 +1608,22 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, String[] keySplit = kv.getKey().split(OM_KEY_PREFIX); String bucketKey = getBucketKey(keySplit[1], keySplit[2]); OmBucketInfo bucketInfo = getBucketTable().get(bucketKey); - + // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. + SnapshotInfo previousSnapshotInfo = bucketInfo == null ? null : + SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), + bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); + // previous snapshot is not active or it has not been flushed to disk then don't process the key in this + // iteration. + if (previousSnapshotInfo != null && + (previousSnapshotInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), + previousSnapshotInfo))) { + continue; + } // Get the latest snapshot in snapshot path. - try (ReferenceCounted - rcLatestSnapshot = getLatestActiveSnapshot( - keySplit[1], keySplit[2], omSnapshotManager)) { + try (ReferenceCounted rcLatestSnapshot = previousSnapshotInfo == null ? null : + omSnapshotManager.getSnapshot(previousSnapshotInfo.getVolumeName(), + previousSnapshotInfo.getBucketName(), previousSnapshotInfo.getName())) { // Multiple keys with the same path can be queued in one DB entry RepeatedOmKeyInfo infoList = kv.getValue(); @@ -1697,17 +1700,24 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, List notReclaimableKeyInfoList = notReclaimableKeyInfo.getOmKeyInfoList(); + // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. + SnapshotInfo newPreviousSnapshotInfo = bucketInfo == null ? null : + SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), + bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); + // Check if the previous snapshot in the chain hasn't changed. + if (Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId), + Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) { + // If all the versions are not reclaimable, then do nothing. + if (notReclaimableKeyInfoList.size() > 0 && + notReclaimableKeyInfoList.size() != + infoList.getOmKeyInfoList().size()) { + keysToModify.put(kv.getKey(), notReclaimableKeyInfo); + } - // If all the versions are not reclaimable, then do nothing. - if (notReclaimableKeyInfoList.size() > 0 && - notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keysToModify.put(kv.getKey(), notReclaimableKeyInfo); - } - - if (notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keyBlocksList.addAll(blockGroupList); + if (notReclaimableKeyInfoList.size() != + infoList.getOmKeyInfoList().size()) { + keyBlocksList.addAll(blockGroupList); + } } } } @@ -1724,55 +1734,6 @@ private boolean versionExistsInPreviousSnapshot(OmKeyInfo omKeyInfo, delOmKeyInfo != null; } - /** - * Get the latest OmSnapshot for a snapshot path. - */ - public ReferenceCounted getLatestActiveSnapshot( - String volumeName, String bucketName, - OmSnapshotManager snapshotManager) - throws IOException { - - String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; - Optional latestPathSnapshot = Optional.ofNullable( - snapshotChainManager.getLatestPathSnapshotId(snapshotPath)); - - Optional snapshotInfo = Optional.empty(); - - while (latestPathSnapshot.isPresent()) { - Optional snapTableKey = latestPathSnapshot - .map(uuid -> snapshotChainManager.getTableKey(uuid)); - - snapshotInfo = snapTableKey.isPresent() ? - Optional.ofNullable(getSnapshotInfoTable().get(snapTableKey.get())) : - Optional.empty(); - - if (snapshotInfo.isPresent() && snapshotInfo.get().getSnapshotStatus() == - SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { - break; - } - - // Update latestPathSnapshot if current snapshot is deleted. - if (snapshotChainManager.hasPreviousPathSnapshot(snapshotPath, - latestPathSnapshot.get())) { - latestPathSnapshot = Optional.ofNullable(snapshotChainManager - .previousPathSnapshot(snapshotPath, latestPathSnapshot.get())); - } else { - latestPathSnapshot = Optional.empty(); - } - } - - Optional> rcOmSnapshot = - snapshotInfo.isPresent() ? - Optional.ofNullable( - snapshotManager.getSnapshot(volumeName, - bucketName, - snapshotInfo.get().getName()) - ) : - Optional.empty(); - - return rcOmSnapshot.orElse(null); - } - /** * Decide whether the open key is a multipart upload related key. * @param openKeyInfo open key related to multipart upload diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java index fdee1b71287..08f2115387e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java @@ -382,6 +382,7 @@ public ListKeysLightResult listKeysLight(String volumeName, * @param obj Ozone object. * @throws IOException if there is error. */ + @Override public List getAcl(OzoneObj obj) throws IOException { String volumeName = obj.getVolumeName(); @@ -428,6 +429,45 @@ public List getAcl(OzoneObj obj) throws IOException { } } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + long start = Time.monotonicNowNanos(); + + ResolvedBucket bucket = captureLatencyNs( + perfMetrics.getLookupResolveBucketLatencyNs(), + () -> ozoneManager.resolveBucketLink(args)); + + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + OmKeyArgs resolvedArgs = bucket.update(args); + + try { + if (isAclEnabled) { + captureLatencyNs(perfMetrics.getGetObjectTaggingAclCheckLatencyNs(), + () -> checkAcls(ResourceType.KEY, StoreType.OZONE, + ACLType.READ, bucket, + args.getKeyName()) + ); + } + metrics.incNumGetObjectTagging(); + return keyManager.getObjectTagging(resolvedArgs, bucket); + } catch (Exception ex) { + metrics.incNumGetObjectTaggingFails(); + auditSuccess = false; + audit.logReadFailure(buildAuditMessageForFailure(OMAction.GET_OBJECT_TAGGING, + auditMap, ex)); + throw ex; + } finally { + if (auditSuccess) { + audit.logReadSuccess(buildAuditMessageForSuccess(OMAction.GET_OBJECT_TAGGING, + auditMap)); + } + + perfMetrics.addGetObjectTaggingLatencyNs(Time.monotonicNowNanos() - start); + } + } + /** * Checks if current caller has acl permissions. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java index 21b2e8b990a..171242310a0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java @@ -46,4 +46,8 @@ public interface OmMetadataReaderMetrics { void incNumKeyListFails(); void incNumGetAcl(); + + void incNumGetObjectTagging(); + + void incNumGetObjectTaggingFails(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java index f863c086028..acb3a41e120 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java @@ -47,6 +47,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; @@ -185,6 +186,11 @@ public List getAcl(OzoneObj obj) throws IOException { return omMetadataReader.getAcl(normalizeOzoneObj(obj)); } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + return omMetadataReader.getObjectTagging(normalizeOmKeyArgs(args)); + } + private OzoneObj normalizeOzoneObj(OzoneObj o) { if (o == null) { return null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 0d17851ed1f..f817625a979 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -38,9 +38,11 @@ import java.util.UUID; import com.google.common.cache.RemovalListener; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; @@ -97,6 +99,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DB_DIR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager.getSnapshotRootPath; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; @@ -349,7 +352,8 @@ public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { // If it happens, then either snapshot has been purged in between or SnapshotChain is corrupted // and missing some entries which needs investigation. if (snapshotTableKey == null) { - throw new IOException("No snapshot exist with snapshotId: " + snapshotId); + throw new OMException("Snapshot " + snapshotId + + " is not found in the snapshot chain.", FILE_NOT_FOUND); } final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); @@ -622,7 +626,12 @@ public ReferenceCounted getActiveFsMetadataOrSnapshot( String[] keyParts = keyName.split(OM_KEY_PREFIX); if (isSnapshotKey(keyParts)) { String snapshotName = keyParts[1]; - + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, + bucketName), false, false); + volumeName = resolvedBucket.realVolume(); + bucketName = resolvedBucket.realBucket(); return (ReferenceCounted) (ReferenceCounted) getActiveSnapshot(volumeName, bucketName, snapshotName); } else { @@ -654,7 +663,6 @@ private ReferenceCounted getSnapshot( // don't allow snapshot indicator without snapshot name throw new OMException(INVALID_KEY_NAME); } - String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); @@ -674,19 +682,73 @@ private ReferenceCounted getSnapshot(String snapshotTableKey, boolea } /** - * Returns true if the snapshot is in given status. - * @param key DB snapshot table key - * @param status SnapshotStatus - * @return true if the snapshot is in given status, false otherwise + * Checks if the last transaction performed on the snapshot has been flushed to disk. + * @param metadataManager Metadatamanager of Active OM. + * @param snapshotTableKey table key corresponding to snapshot in snapshotInfoTable. + * @return True if the changes have been flushed to DB otherwise false + * @throws IOException */ - public boolean isSnapshotStatus(String key, - SnapshotInfo.SnapshotStatus status) + public static boolean areSnapshotChangesFlushedToDB(OMMetadataManager metadataManager, String snapshotTableKey) throws IOException { - return getSnapshotInfo(key).getSnapshotStatus().equals(status); + // Need this info from cache since the snapshot could have been updated only on cache and not on disk. + SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable().get(snapshotTableKey); + return areSnapshotChangesFlushedToDB(metadataManager, snapshotInfo); + } + + /** + * Checks if the last transaction performed on the snapshot has been flushed to disk. + * @param metadataManager Metadatamanager of Active OM. + * @param snapshotInfo SnapshotInfo value. + * @return True if the changes have been flushed to DB otherwise false. It would return true if the snapshot + * provided is null meaning the snapshot doesn't exist. + * @throws IOException + */ + public static boolean areSnapshotChangesFlushedToDB(OMMetadataManager metadataManager, SnapshotInfo snapshotInfo) + throws IOException { + if (snapshotInfo != null) { + TransactionInfo snapshotTransactionInfo = snapshotInfo.getLastTransactionInfo() != null ? + TransactionInfo.fromByteString(snapshotInfo.getLastTransactionInfo()) : null; + TransactionInfo omTransactionInfo = TransactionInfo.readTransactionInfo(metadataManager); + // If transactionInfo field is null then return true to keep things backward compatible. + return snapshotTransactionInfo == null || omTransactionInfo.compareTo(snapshotTransactionInfo) >= 0; + } + return true; + } + + + /** + * Returns OmSnapshot object and skips active check. + * This should only be used for API calls initiated by background service e.g. purgeKeys, purgeSnapshot, + * snapshotMoveDeletedKeys, and SetSnapshotProperty. + */ + public ReferenceCounted getSnapshot(UUID snapshotId) throws IOException { + return snapshotCache.get(snapshotId); } - public SnapshotInfo getSnapshotInfo(String key) throws IOException { - return SnapshotUtils.getSnapshotInfo(ozoneManager, key); + /** + * Returns snapshotInfo from cache if it is present in cache, otherwise it checks RocksDB and return value from there. + * ################################################# + * NOTE: THIS SHOULD BE USED BY SNAPSHOT CACHE ONLY. + * ################################################# + * Sometimes, the follower OM node may be lagging that it gets purgeKeys or snapshotMoveDeletedKeys from a Snapshot, + * and purgeSnapshot for the same Snapshot one after another. And purgeSnapshot's validateAndUpdateCache gets + * executed before doubleBuffer flushes purgeKeys or snapshotMoveDeletedKeys from that Snapshot. + * This should not be a case on the leader node because SnapshotDeletingService checks that deletedTable and + * deletedDirectoryTable in DB don't have entries for the bucket before it sends a purgeSnapshot on a snapshot. + * If that happens, and we just look into the cache, the addToBatch operation will fail when it tries to open + * the DB and purgeKeys from the Snapshot because snapshot is already purged from the SnapshotInfoTable cache. + * Hence, it is needed to look into the table to make sure that snapshot exists somewhere either in cache or in DB. + */ + private SnapshotInfo getSnapshotInfo(String snapshotKey) throws IOException { + SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager().getSnapshotInfoTable().get(snapshotKey); + + if (snapshotInfo == null) { + snapshotInfo = ozoneManager.getMetadataManager().getSnapshotInfoTable().getSkipCache(snapshotKey); + } + if (snapshotInfo == null) { + throw new OMException("Snapshot '" + snapshotKey + "' is not found.", FILE_NOT_FOUND); + } + return snapshotInfo; } public static String getSnapshotPrefix(String snapshotName) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java index 7560d453eb9..d00b12e94ce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java @@ -162,5 +162,22 @@ public void incNumGetAcl() { MutableCounterLong numKeyOps; private @Metric MutableCounterLong numFSOps; + + + private @Metric + MutableCounterLong numGetObjectTagging; + private @Metric + MutableCounterLong numGetObjectTaggingFails; + + @Override + public void incNumGetObjectTagging() { + numGetObjectTagging.incr(); + numKeyOps.incr(); + } + + @Override + public void incNumGetObjectTaggingFails() { + numGetObjectTaggingFails.incr(); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java index c09c5b91af5..cad987bb7da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java @@ -19,21 +19,11 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collection; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS; - /** * Utility class for ozone configurations. */ @@ -43,38 +33,6 @@ public final class OzoneConfigUtil { private OzoneConfigUtil() { } - /** - * Return list of s3 administrators prop from config. - * - * If ozone.s3.administrators value is empty string or unset, - * defaults to ozone.administrators value. - */ - static Collection getS3AdminsFromConfig(OzoneConfiguration conf) - throws IOException { - Collection ozAdmins = - conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS); - if (ozAdmins == null || ozAdmins.isEmpty()) { - ozAdmins = conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS); - } - String omSPN = UserGroupInformation.getCurrentUser().getShortUserName(); - if (!ozAdmins.contains(omSPN)) { - ozAdmins.add(omSPN); - } - return ozAdmins; - } - - static Collection getS3AdminsGroupsFromConfig( - OzoneConfiguration conf) { - Collection s3AdminsGroup = - conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS); - if (s3AdminsGroup.isEmpty() && conf - .getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) { - s3AdminsGroup = conf - .getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS); - } - return s3AdminsGroup; - } - public static ReplicationConfig resolveReplicationConfigPreference( HddsProtos.ReplicationType clientType, HddsProtos.ReplicationFactor clientFactor, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index a514262cae2..2ccc16cc285 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -87,6 +87,8 @@ import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeyClient; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; @@ -106,6 +108,7 @@ import org.apache.hadoop.ozone.om.s3.S3SecretCacheProvider; import org.apache.hadoop.ozone.om.s3.S3SecretStoreProvider; import org.apache.hadoop.ozone.om.service.OMRangerBGSyncService; +import org.apache.hadoop.ozone.om.service.QuotaRepairTask; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; @@ -119,8 +122,6 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.security.SecurityConfig; -import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; -import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeySignerClient; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; @@ -172,7 +173,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; @@ -274,7 +274,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.PREPARE_MARKER_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; @@ -371,7 +371,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OzoneDelegationTokenSecretManager delegationTokenMgr; private OzoneBlockTokenSecretManager blockTokenMgr; private CertificateClient certClient; - private SecretKeySignerClient secretKeyClient; + private SecretKeyClient secretKeyClient; private ScmTopologyClient scmTopologyClient; private final Text omRpcAddressTxt; private OzoneConfiguration configuration; @@ -437,7 +437,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private List ratisReporterList = null; private KeyProviderCryptoExtension kmsProvider; - private OzoneFsServerDefaults serverDefaults; private final OMLayoutVersionManager versionManager; private final ReplicationConfigValidator replicationConfigValidator; @@ -476,6 +475,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private boolean fsSnapshotEnabled; + private String omHostName; + /** * OM Startup mode. */ @@ -655,14 +656,6 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) kmsProvider = null; LOG.error("Fail to create Key Provider"); } - Configuration hadoopConfig = - LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration); - URI keyProviderUri = KMSUtil.getKeyProviderUri( - hadoopConfig, - CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); - String keyProviderUriStr = - (keyProviderUri != null) ? keyProviderUri.toString() : null; - serverDefaults = new OzoneFsServerDefaults(keyProviderUriStr); if (secConfig.isSecurityEnabled()) { omComponent = OM_DAEMON + "-" + omId; HddsProtos.OzoneManagerDetailsProto omInfo = @@ -680,8 +673,8 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) SecretKeyProtocol secretKeyProtocol = HddsServerUtil.getSecretKeyClientForOm(conf); - secretKeyClient = new DefaultSecretKeySignerClient(secretKeyProtocol, - omNodeDetails.threadNamePrefix()); + secretKeyClient = DefaultSecretKeyClient.create( + conf, secretKeyProtocol, omNodeDetails.threadNamePrefix()); } serviceInfo = new ServiceInfoProvider(secConfig, this, certClient, testSecureOmFlag); @@ -704,11 +697,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) // Get read only admin list readOnlyAdmins = OzoneAdmins.getReadonlyAdmins(conf); - Collection s3AdminUsernames = - OzoneConfigUtil.getS3AdminsFromConfig(configuration); - Collection s3AdminGroups = - OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration); - s3OzoneAdmins = new OzoneAdmins(s3AdminUsernames, s3AdminGroups); + s3OzoneAdmins = OzoneAdmins.getS3Admins(conf); instantiateServices(false); // Create special volume s3v which is required for S3G. @@ -746,6 +735,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) } bucketUtilizationMetrics = BucketUtilizationMetrics.create(metadataManager); + omHostName = HddsUtils.getHostName(conf); } public boolean isStopped() { @@ -791,8 +781,9 @@ private void setInstanceVariablesFromConf() { * * @param conf OzoneConfiguration * @return OM instance - * @throws IOException, AuthenticationException in case OM instance - * creation fails. + * @throws IOException AuthenticationException in case OM instance + * creation fails, + * @throws AuthenticationException */ public static OzoneManager createOm(OzoneConfiguration conf) throws IOException, AuthenticationException { @@ -874,7 +865,13 @@ private void instantiateServices(boolean withNewSnapshot) throws IOException { prefixManager = new PrefixManagerImpl(this, metadataManager, isRatisEnabled); keyManager = new KeyManagerImpl(this, scmClient, configuration, perfMetrics); - accessAuthorizer = OzoneAuthorizerFactory.forOM(this); + // If authorizer is not initialized or the authorizer is Native + // re-initialize the authorizer, else for non-native authorizer + // like ranger we can reuse previous value if it is initialized + if (null == accessAuthorizer || accessAuthorizer.isNative()) { + accessAuthorizer = OzoneAuthorizerFactory.forOM(this); + } + omMetadataReader = new OmMetadataReader(keyManager, prefixManager, this, LOG, AUDIT, metrics, accessAuthorizer); // Active DB's OmMetadataReader instance does not need to be reference @@ -1091,6 +1088,7 @@ private OzoneDelegationTokenSecretManager createDelegationTokenSecretManager( .setOzoneManager(this) .setS3SecretManager(s3SecretManager) .setCertificateClient(certClient) + .setSecretKeyClient(secretKeyClient) .setOmServiceId(omNodeDetails.getServiceId()) .build(); } @@ -1133,7 +1131,7 @@ public void startSecretManager() { throw new UncheckedIOException(e); } - if (secConfig.isBlockTokenEnabled() && blockTokenMgr != null) { + if (secConfig.isSecurityEnabled()) { LOG.info("Starting secret key client."); try { secretKeyClient.start(configuration); @@ -1186,10 +1184,14 @@ public NetworkTopology getClusterMap() { * without fully setting up a working secure cluster. */ @VisibleForTesting - public void setSecretKeyClient( - SecretKeySignerClient secretKeyClient) { + public void setSecretKeyClient(SecretKeyClient secretKeyClient) { this.secretKeyClient = secretKeyClient; - blockTokenMgr.setSecretKeyClient(secretKeyClient); + if (blockTokenMgr != null) { + blockTokenMgr.setSecretKeyClient(secretKeyClient); + } + if (delegationTokenMgr != null) { + delegationTokenMgr.setSecretKeyClient(secretKeyClient); + } } /** @@ -1496,7 +1498,7 @@ private void initializeRatisDirs(OzoneConfiguration conf) throws IOException { // snapshot directory in Ratis storage directory. if yes, move it to // new snapshot directory. - File snapshotDir = new File(omRatisDirectory, OM_RATIS_SNAPSHOT_DIR); + File snapshotDir = new File(omRatisDirectory, OZONE_RATIS_SNAPSHOT_DIR); if (snapshotDir.isDirectory()) { FileUtils.moveDirectory(snapshotDir.toPath(), @@ -2065,6 +2067,7 @@ private void addOMNodeToPeers(String newOMNodeId) throws IOException { } catch (IOException e) { LOG.error("{}: Couldn't add OM {} to peer list.", getOMNodeId(), newOMNodeId); + return; } if (omRatisSnapshotProvider == null) { @@ -2334,6 +2337,10 @@ public boolean stop() { if (bucketUtilizationMetrics != null) { bucketUtilizationMetrics.unRegister(); } + + if (versionManager != null) { + versionManager.close(); + } return true; } catch (Exception e) { LOG.error("OzoneManager stop failed.", e); @@ -2969,39 +2976,6 @@ public ListKeysLightResult listKeysLight(String volumeName, return new ListKeysLightResult(basicKeysList, listKeysResult.isTruncated()); } - @Override - public List listTrash(String volumeName, - String bucketName, String startKeyName, String keyPrefix, int maxKeys) - throws IOException { - boolean auditSuccess = true; - Map auditMap = buildAuditMap(volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); - auditMap.put(OzoneConsts.START_KEY, startKeyName); - auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix); - auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); - try { - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, - StoreType.OZONE, ACLType.LIST, - volumeName, bucketName, keyPrefix); - } - metrics.incNumTrashKeyLists(); - return keyManager.listTrash(volumeName, bucketName, - startKeyName, keyPrefix, maxKeys); - } catch (IOException ex) { - metrics.incNumTrashKeyListFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_TRASH, - auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_TRASH, - auditMap)); - } - } - } - @Override public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, String snapshotName) throws IOException { @@ -3009,12 +2983,13 @@ public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, Map auditMap = buildAuditMap(volumeName); auditMap.put(OzoneConsts.BUCKET, bucketName); try { - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, - ACLType.READ, volumeName, bucketName, null); - } + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + auditMap = buildAuditMap(resolvedBucket.realVolume()); + auditMap.put(OzoneConsts.BUCKET, resolvedBucket.realBucket()); SnapshotInfo snapshotInfo = - metadataManager.getSnapshotInfo(volumeName, bucketName, snapshotName); + metadataManager.getSnapshotInfo(resolvedBucket.realVolume(), resolvedBucket.realBucket(), snapshotName); AUDIT.logReadSuccess(buildAuditMessageForSuccess( OMAction.SNAPSHOT_INFO, auditMap)); @@ -3035,12 +3010,17 @@ public ListSnapshotResponse listSnapshot( Map auditMap = buildAuditMap(volumeName); auditMap.put(OzoneConsts.BUCKET, bucketName); try { + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + auditMap = buildAuditMap(resolvedBucket.realVolume()); + auditMap.put(OzoneConsts.BUCKET, resolvedBucket.realBucket()); if (isAclEnabled) { omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, - ACLType.LIST, volumeName, bucketName, null); + ACLType.LIST, resolvedBucket.realVolume(), resolvedBucket.realBucket(), null); } ListSnapshotResponse listSnapshotResponse = - metadataManager.listSnapshot(volumeName, bucketName, + metadataManager.listSnapshot(resolvedBucket.realVolume(), resolvedBucket.realBucket(), snapshotPrefix, prevSnapshot, maxListResult); AUDIT.logReadSuccess(buildAuditMessageForSuccess( @@ -3095,6 +3075,11 @@ private void unregisterMXBean() { } } + @Override + public String getNamespace() { + return omNodeDetails.getServiceId(); + } + @Override public String getRpcPort() { return "" + omRpcAddress.getPort(); @@ -3154,6 +3139,11 @@ public String getRocksDbDirectory() { return String.valueOf(OMStorage.getOmDbDir(configuration)); } + @Override + public String getHostname() { + return omHostName; + } + @VisibleForTesting public OzoneManagerHttpServer getHttpServer() { return httpServer; @@ -3171,6 +3161,15 @@ public List getServiceList() throws IOException { .setType(ServicePort.Type.RPC) .setValue(omRpcAddress.getPort()) .build()); + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration); + URI keyProviderUri = KMSUtil.getKeyProviderUri( + hadoopConfig, + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); + String keyProviderUriStr = + (keyProviderUri != null) ? keyProviderUri.toString() : null; + omServiceInfoBuilder.setServerDefaults( + new OzoneFsServerDefaults(keyProviderUriStr)); if (httpServer != null && httpServer.getHttpAddress() != null) { omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() @@ -4370,7 +4369,7 @@ private void checkAdminUserPrivilege(String operation) throws IOException { } public boolean isS3Admin(UserGroupInformation callerUgi) { - return callerUgi != null && s3OzoneAdmins.isAdmin(callerUgi); + return OzoneAdmins.isS3Admin(callerUgi, s3OzoneAdmins); } @VisibleForTesting @@ -4420,10 +4419,16 @@ public ResolvedBucket resolveBucketLink(Pair requested, } public ResolvedBucket resolveBucketLink(Pair requested, - boolean allowDanglingBuckets) + boolean allowDanglingBuckets) throws IOException { + return resolveBucketLink(requested, allowDanglingBuckets, isAclEnabled); + } + + public ResolvedBucket resolveBucketLink(Pair requested, + boolean allowDanglingBuckets, + boolean aclEnabled) throws IOException { OmBucketInfo resolved; - if (isAclEnabled) { + if (aclEnabled) { UserGroupInformation ugi = getRemoteUser(); if (getS3Auth() != null) { ugi = UserGroupInformation.createRemoteUser( @@ -4434,15 +4439,26 @@ public ResolvedBucket resolveBucketLink(Pair requested, ugi, remoteIp != null ? remoteIp : omRpcAddress.getAddress(), remoteIp != null ? remoteIp.getHostName() : - omRpcAddress.getHostName(), allowDanglingBuckets); + omRpcAddress.getHostName(), allowDanglingBuckets, aclEnabled); } else { resolved = resolveBucketLink(requested, new HashSet<>(), - null, null, null, allowDanglingBuckets); + null, null, null, allowDanglingBuckets, aclEnabled); } return new ResolvedBucket(requested.getLeft(), requested.getRight(), resolved); } + private OmBucketInfo resolveBucketLink( + Pair volumeAndBucket, + Set> visited, + UserGroupInformation userGroupInformation, + InetAddress remoteAddress, + String hostName, + boolean allowDanglingBuckets) throws IOException { + return resolveBucketLink(volumeAndBucket, visited, userGroupInformation, remoteAddress, hostName, + allowDanglingBuckets, isAclEnabled); + } + /** * Resolves bucket symlinks. Read permission is required for following links. * @@ -4460,7 +4476,8 @@ private OmBucketInfo resolveBucketLink( UserGroupInformation userGroupInformation, InetAddress remoteAddress, String hostName, - boolean allowDanglingBuckets) throws IOException { + boolean allowDanglingBuckets, + boolean aclEnabled) throws IOException { String volumeName = volumeAndBucket.getLeft(); String bucketName = volumeAndBucket.getRight(); @@ -4483,7 +4500,7 @@ private OmBucketInfo resolveBucketLink( DETECTED_LOOP_IN_BUCKET_LINKS); } - if (isAclEnabled) { + if (aclEnabled) { final ACLType type = ACLType.READ; checkAcls(ResourceType.BUCKET, StoreType.OZONE, type, volumeName, bucketName, null, userGroupInformation, @@ -4494,7 +4511,7 @@ private OmBucketInfo resolveBucketLink( return resolveBucketLink( Pair.of(info.getSourceVolume(), info.getSourceBucket()), visited, userGroupInformation, remoteAddress, hostName, - allowDanglingBuckets); + allowDanglingBuckets, aclEnabled); } @VisibleForTesting @@ -4781,10 +4798,26 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) } @Override - public OzoneFsServerDefaults getServerDefaults() { - return serverDefaults; + public String getQuotaRepairStatus() throws IOException { + checkAdminUserPrivilege("quota repair status"); + return QuotaRepairTask.getStatus(); } + @Override + public void startQuotaRepair(List buckets) throws IOException { + checkAdminUserPrivilege("start quota repair"); + new QuotaRepairTask(this).repair(buckets); + } + + @Override + public Map getObjectTagging(final OmKeyArgs args) + throws IOException { + try (ReferenceCounted rcReader = getReader(args)) { + return rcReader.get().getObjectTagging(args); + } + } + + /** * Write down Layout version of a finalized feature to DB on finalization. * @param lvm OMLayoutVersionManager @@ -4915,13 +4948,11 @@ public SnapshotDiffResponse snapshotDiff(String volume, boolean forceFullDiff, boolean disableNativeDiff) throws IOException { - - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume, bucket, null); - } - - return omSnapshotManager.getSnapshotDiffReport(volume, bucket, fromSnapshot, toSnapshot, - token, pageSize, forceFullDiff, disableNativeDiff); + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = resolveBucketLink(Pair.of(volume, bucket), false); + return omSnapshotManager.getSnapshotDiffReport(resolvedBucket.realVolume(), resolvedBucket.realBucket(), + fromSnapshot, toSnapshot, token, pageSize, forceFullDiff, disableNativeDiff); } public CancelSnapshotDiffResponse cancelSnapshotDiff(String volume, @@ -4929,12 +4960,9 @@ public CancelSnapshotDiffResponse cancelSnapshotDiff(String volume, String fromSnapshot, String toSnapshot) throws IOException { - - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume, bucket, null); - } - - return omSnapshotManager.cancelSnapshotDiff(volume, bucket, fromSnapshot, toSnapshot); + ResolvedBucket resolvedBucket = this.resolveBucketLink(Pair.of(volume, bucket), false); + return omSnapshotManager.cancelSnapshotDiff(resolvedBucket.realVolume(), resolvedBucket.realBucket(), + fromSnapshot, toSnapshot); } public List listSnapshotDiffJobs(String volume, @@ -4942,12 +4970,13 @@ public List listSnapshotDiffJobs(String volume, String jobStatus, boolean listAll) throws IOException { - + ResolvedBucket resolvedBucket = this.resolveBucketLink(Pair.of(volume, bucket), false); if (isAclEnabled) { omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, volume, bucket, null); } - return omSnapshotManager.getSnapshotDiffList(volume, bucket, jobStatus, listAll); + return omSnapshotManager.getSnapshotDiffList(resolvedBucket.realVolume(), resolvedBucket.realBucket(), + jobStatus, listAll); } public String printCompactionLogDag(String fileNamePrefix, @@ -5042,4 +5071,11 @@ public void awaitDoubleBufferFlush() throws InterruptedException { getOmServerProtocol().awaitDoubleBufferFlush(); } } + + public void checkFeatureEnabled(OzoneManagerVersion feature) throws OMException { + String disabledFeatures = configuration.get(OMConfigKeys.OZONE_OM_FEATURES_DISABLED, ""); + if (disabledFeatures.contains(feature.name())) { + throw new OMException("Feature disabled: " + feature, OMException.ResultCodes.NOT_SUPPORTED_OPERATION); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java index 5a4ff643157..c693e529580 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java @@ -51,6 +51,8 @@ private OzoneManagerUtils() { * OzoneManagerStateMachine#runCommand function and ensures sequential * execution path. * Below is the call trace to perform OM client request operation: + *
    +   * {@code
        * OzoneManagerStateMachine#applyTransaction ->
        * OzoneManagerStateMachine#runCommand ->
        * OzoneManagerRequestHandler#handleWriteRequest ->
    @@ -60,6 +62,8 @@ private OzoneManagerUtils() {
        * OzoneManagerUtils#getBucketLayout ->
        * OzoneManagerUtils#getOmBucketInfo ->
        * omMetadataManager().getBucketTable().get(buckKey)
    +   * }
    +   * 
    */ public static OmBucketInfo getBucketInfo(OMMetadataManager metaMgr, @@ -164,12 +168,8 @@ private static OmBucketInfo resolveBucketInfoLink( * buck-src has the actual BucketLayout that will be used by the * links. */ - try { - return resolveBucketInfoLink(metadataManager, - buckInfo.getSourceVolume(), buckInfo.getSourceBucket(), visited); - } catch (IOException e) { - throw e; - } + return resolveBucketInfoLink(metadataManager, buckInfo.getSourceVolume(), + buckInfo.getSourceBucket(), visited); } return buckInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index 60353590e75..e4102665d62 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -24,8 +24,10 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.NoSuchElementException; @@ -56,6 +58,7 @@ public class SnapshotChainManager { private final ConcurrentMap snapshotIdToTableKey; private UUID latestGlobalSnapshotId; private final boolean snapshotChainCorrupted; + private UUID oldestGlobalSnapshotId; public SnapshotChainManager(OMMetadataManager metadataManager) { globalSnapshotChain = Collections.synchronizedMap(new LinkedHashMap<>()); @@ -104,6 +107,8 @@ private void addSnapshotGlobal(UUID snapshotID, UUID prevGlobalID) // On add snapshot, set previous snapshot entry nextSnapshotID = // snapshotID globalSnapshotChain.get(prevGlobalID).setNextSnapshotId(snapshotID); + } else { + oldestGlobalSnapshotId = snapshotID; } globalSnapshotChain.put(snapshotID, @@ -171,7 +176,6 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { // for node removal UUID next = globalSnapshotChain.get(snapshotID).getNextSnapshotId(); UUID prev = globalSnapshotChain.get(snapshotID).getPreviousSnapshotId(); - if (prev != null && !globalSnapshotChain.containsKey(prev)) { throw new IOException(String.format( "Global snapshot chain corruption. " + @@ -197,6 +201,9 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { if (latestGlobalSnapshotId.equals(snapshotID)) { latestGlobalSnapshotId = prev; } + if (snapshotID.equals(oldestGlobalSnapshotId)) { + oldestGlobalSnapshotId = next; + } return true; } else { // snapshotID not found in snapshot chain, log warning and return @@ -362,13 +369,16 @@ public synchronized void updateSnapshot(SnapshotInfo snapshotInfo) { public synchronized boolean deleteSnapshot(SnapshotInfo snapshotInfo) throws IOException { validateSnapshotChain(); - boolean status = deleteSnapshotGlobal(snapshotInfo.getSnapshotId()) && - deleteSnapshotPath(snapshotInfo.getSnapshotPath(), - snapshotInfo.getSnapshotId()); - if (status) { - snapshotIdToTableKey.remove(snapshotInfo.getSnapshotId()); - } - return status; + return deleteSnapshotGlobal(snapshotInfo.getSnapshotId()) && + deleteSnapshotPath(snapshotInfo.getSnapshotPath(), snapshotInfo.getSnapshotId()); + } + + /** + * Remove the snapshot from snapshotIdToSnapshotTableKey map. + */ + public synchronized void removeFromSnapshotIdToTable(UUID snapshotId) throws IOException { + validateSnapshotChain(); + snapshotIdToTableKey.remove(snapshotId); } /** @@ -379,6 +389,42 @@ public UUID getLatestGlobalSnapshotId() throws IOException { return latestGlobalSnapshotId; } + /** + * Get oldest of global snapshot in snapshot chain. + */ + public UUID getOldestGlobalSnapshotId() throws IOException { + validateSnapshotChain(); + return oldestGlobalSnapshotId; + } + + public Iterator iterator(final boolean reverse) throws IOException { + validateSnapshotChain(); + return new Iterator() { + private UUID currentSnapshotId = reverse ? getLatestGlobalSnapshotId() : getOldestGlobalSnapshotId(); + @Override + public boolean hasNext() { + return currentSnapshotId != null; + } + + @Override + public UUID next() { + try { + UUID prevSnapshotId = currentSnapshotId; + if (reverse && hasPreviousGlobalSnapshot(currentSnapshotId) || + !reverse && hasNextGlobalSnapshot(currentSnapshotId)) { + currentSnapshotId = + reverse ? previousGlobalSnapshot(currentSnapshotId) : nextGlobalSnapshot(currentSnapshotId); + } else { + currentSnapshotId = null; + } + return prevSnapshotId; + } catch (IOException e) { + throw new UncheckedIOException("Error while getting next snapshot for " + currentSnapshotId, e); + } + } + }; + } + /** * Get latest path snapshot in snapshot chain. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java index 6e1c9da34cb..bd462224e9d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.om; import com.google.common.base.Preconditions; -import com.google.protobuf.RpcController; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -35,15 +34,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,8 +65,6 @@ */ public class TrashOzoneFileSystem extends FileSystem { - private static final RpcController NULL_RPC_CONTROLLER = null; - private static final int OZONE_FS_ITERATE_BATCH_SIZE = 100; private static final int OZONE_MAX_LIST_KEYS_SIZE = 10000; @@ -97,34 +91,15 @@ public TrashOzoneFileSystem(OzoneManager ozoneManager) throws IOException { ozoneConfiguration = OzoneConfiguration.of(getConf()); } - private RaftClientRequest getRatisRequest( - OzoneManagerProtocolProtos.OMRequest omRequest) { - return RaftClientRequest.newBuilder() - .setClientId(CLIENT_ID) - .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) - .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) - .setCallId(runCount.getAndIncrement()) - .setMessage( - Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - } - private void submitRequest(OzoneManagerProtocolProtos.OMRequest omRequest) throws Exception { ozoneManager.getMetrics().incNumTrashWriteRequests(); if (ozoneManager.isRatisEnabled()) { - OMClientRequest omClientRequest = - OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager); + // perform preExecute as ratis submit do no perform preExecute + OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager); omRequest = omClientRequest.preExecute(ozoneManager); - RaftClientRequest req = getRatisRequest(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, req); - } else { - ozoneManager.getOmServerProtocol(). - submitRequest(NULL_RPC_CONTROLLER, omRequest); } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, CLIENT_ID, runCount.getAndIncrement()); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java index 9064d5d454c..2aa8114e278 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java @@ -33,22 +33,14 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.TrashPolicyDefault; import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.InvalidPathException; +import org.apache.hadoop.fs.ozone.OzoneTrashPolicy; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.conf.OMClientConfig; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT; @@ -57,45 +49,26 @@ * of TrashPolicy ozone-specific trash optimizations are/will be made such as * having a multithreaded TrashEmptier. */ -public class TrashPolicyOzone extends TrashPolicyDefault { +public class TrashPolicyOzone extends OzoneTrashPolicy { private static final Logger LOG = LoggerFactory.getLogger(TrashPolicyOzone.class); - private static final Path CURRENT = new Path("Current"); - - private static final FsPermission PERMISSION = - new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); - private static final DateFormat CHECKPOINT = new SimpleDateFormat( "yyMMddHHmmss"); /** Format of checkpoint directories used prior to Hadoop 0.23. */ private static final DateFormat OLD_CHECKPOINT = new SimpleDateFormat("yyMMddHHmm"); - private static final int MSECS_PER_MINUTE = 60 * 1000; - private long emptierInterval; - private Configuration configuration; - private OzoneManager om; - private OzoneConfiguration ozoneConfiguration; - public TrashPolicyOzone() { } @Override public void initialize(Configuration conf, FileSystem fs) { - this.fs = fs; - this.configuration = conf; - float hadoopTrashInterval = conf.getFloat( - FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); - // check whether user has configured ozone specific trash-interval - // if not fall back to hadoop configuration - this.deletionInterval = (long)(conf.getFloat( - OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, hadoopTrashInterval) - * MSECS_PER_MINUTE); + super.initialize(conf, fs); float hadoopCheckpointInterval = conf.getFloat( FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT); @@ -112,7 +85,6 @@ public void initialize(Configuration conf, FileSystem fs) { + "Changing to default value 0", deletionInterval); this.deletionInterval = 0; } - ozoneConfiguration = OzoneConfiguration.of(this.configuration); } TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om) { @@ -122,142 +94,10 @@ public void initialize(Configuration conf, FileSystem fs) { @Override public Runnable getEmptier() throws IOException { - return new TrashPolicyOzone.Emptier((OzoneConfiguration) configuration, + return new TrashPolicyOzone.Emptier(getOzoneConfiguration(), emptierInterval, om.getThreadNamePrefix()); } - @Override - public boolean moveToTrash(Path path) throws IOException { - if (validatePath(path)) { - if (!isEnabled()) { - return false; - } - - if (!path.isAbsolute()) { // make path absolute - path = new Path(fs.getWorkingDirectory(), path); - } - - // check that path exists - fs.getFileStatus(path); - String qpath = fs.makeQualified(path).toString(); - - Path trashRoot = fs.getTrashRoot(path); - Path trashCurrent = new Path(trashRoot, CURRENT); - if (qpath.startsWith(trashRoot.toString())) { - return false; // already in trash - } - - if (trashRoot.getParent().toString().startsWith(qpath)) { - throw new IOException("Cannot move \"" + path - + "\" to the trash, as it contains the trash"); - } - - Path trashPath; - Path baseTrashPath; - if (fs.getUri().getScheme().equals(OzoneConsts.OZONE_OFS_URI_SCHEME)) { - OFSPath ofsPath = new OFSPath(path, ozoneConfiguration); - // trimming volume and bucket in order to be compatible with o3fs - // Also including volume and bucket name in the path is redundant as - // the key is already in a particular volume and bucket. - Path trimmedVolumeAndBucket = - new Path(OzoneConsts.OZONE_URI_DELIMITER - + ofsPath.getKeyName()); - trashPath = makeTrashRelativePath(trashCurrent, trimmedVolumeAndBucket); - baseTrashPath = makeTrashRelativePath(trashCurrent, - trimmedVolumeAndBucket.getParent()); - } else { - trashPath = makeTrashRelativePath(trashCurrent, path); - baseTrashPath = makeTrashRelativePath(trashCurrent, path.getParent()); - } - - IOException cause = null; - - // try twice, in case checkpoint between the mkdirs() & rename() - for (int i = 0; i < 2; i++) { - try { - if (!fs.mkdirs(baseTrashPath, PERMISSION)) { // create current - LOG.warn("Can't create(mkdir) trash directory: " + baseTrashPath); - return false; - } - } catch (FileAlreadyExistsException e) { - // find the path which is not a directory, and modify baseTrashPath - // & trashPath, then mkdirs - Path existsFilePath = baseTrashPath; - while (!fs.exists(existsFilePath)) { - existsFilePath = existsFilePath.getParent(); - } - baseTrashPath = new Path(baseTrashPath.toString() - .replace(existsFilePath.toString(), - existsFilePath.toString() + Time.now())); - trashPath = new Path(baseTrashPath, trashPath.getName()); - // retry, ignore current failure - --i; - continue; - } catch (IOException e) { - LOG.warn("Can't create trash directory: " + baseTrashPath, e); - cause = e; - break; - } - try { - // if the target path in Trash already exists, then append with - // a current time in millisecs. - String orig = trashPath.toString(); - - while (fs.exists(trashPath)) { - trashPath = new Path(orig + Time.now()); - } - - // move to current trash - boolean renamed = fs.rename(path, trashPath); - if (!renamed) { - LOG.error("Failed to move to trash: {}", path); - throw new IOException("Failed to move to trash: " + path); - } - LOG.info("Moved: '" + path + "' to trash at: " + trashPath); - return true; - } catch (IOException e) { - cause = e; - } - } - throw (IOException) new IOException("Failed to move to trash: " + path) - .initCause(cause); - } - return false; - } - - private boolean validatePath(Path path) throws IOException { - String key = path.toUri().getPath(); - // Check to see if bucket is path item to be deleted. - // Cannot moveToTrash if bucket is deleted, - // return error for this condition - OFSPath ofsPath = new OFSPath(key.substring(1), ozoneConfiguration); - if (path.isRoot() || ofsPath.isBucket()) { - throw new IOException("Recursive rm of bucket " - + path.toString() + " not permitted"); - } - - Path trashRoot = this.fs.getTrashRoot(path); - - LOG.debug("Key path to moveToTrash: {}", key); - String trashRootKey = trashRoot.toUri().getPath(); - LOG.debug("TrashrootKey for moveToTrash: {}", trashRootKey); - - if (!OzoneFSUtils.isValidName(key)) { - throw new InvalidPathException("Invalid path Name " + key); - } - // first condition tests when length key is <= length trash - // and second when length key > length trash - if ((key.contains(this.fs.TRASH_PREFIX)) && (trashRootKey.startsWith(key)) - || key.startsWith(trashRootKey)) { - return false; - } - return true; - } - - private Path makeTrashRelativePath(Path basePath, Path rmFilePath) { - return Path.mergePaths(basePath, rmFilePath); - } - protected class Emptier implements Runnable { private Configuration conf; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java index de567447ae3..be57a7b7451 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java @@ -50,145 +50,115 @@ /** * Class defines the structure and types of the om.db. */ -public class OMDBDefinition extends DBDefinition.WithMap { +public final class OMDBDefinition extends DBDefinition.WithMap { public static final DBColumnFamilyDefinition DELETED_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DELETED_TABLE, - String.class, StringCodec.get(), - RepeatedOmKeyInfo.class, RepeatedOmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition USER_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.USER_TABLE, - String.class, StringCodec.get(), - PersistedUserVolumeInfo.class, Proto2Codec.get(PersistedUserVolumeInfo.getDefaultInstance())); public static final DBColumnFamilyDefinition VOLUME_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.VOLUME_TABLE, - String.class, StringCodec.get(), - OmVolumeArgs.class, OmVolumeArgs.getCodec()); public static final DBColumnFamilyDefinition OPEN_KEY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.OPEN_KEY_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition KEY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.KEY_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition BUCKET_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.BUCKET_TABLE, - String.class, StringCodec.get(), - OmBucketInfo.class, OmBucketInfo.getCodec()); public static final DBColumnFamilyDefinition MULTIPART_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.MULTIPARTINFO_TABLE, - String.class, StringCodec.get(), - OmMultipartKeyInfo.class, OmMultipartKeyInfo.getCodec()); public static final DBColumnFamilyDefinition PREFIX_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.PREFIX_TABLE, - String.class, StringCodec.get(), - OmPrefixInfo.class, OmPrefixInfo.getCodec()); public static final DBColumnFamilyDefinition DTOKEN_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DELEGATION_TOKEN_TABLE, - OzoneTokenIdentifier.class, TokenIdentifierCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition S3_SECRET_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.S3_SECRET_TABLE, - String.class, StringCodec.get(), - S3SecretValue.class, S3SecretValue.getCodec()); public static final DBColumnFamilyDefinition TRANSACTION_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TRANSACTION_INFO_TABLE, - String.class, StringCodec.get(), - TransactionInfo.class, TransactionInfo.getCodec()); public static final DBColumnFamilyDefinition DIRECTORY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DIRECTORY_TABLE, - String.class, StringCodec.get(), - OmDirectoryInfo.class, OmDirectoryInfo.getCodec()); public static final DBColumnFamilyDefinition FILE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.FILE_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition OPEN_FILE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.OPEN_FILE_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition DELETED_DIR_TABLE = new DBColumnFamilyDefinition<>(OmMetadataManagerImpl.DELETED_DIR_TABLE, - String.class, StringCodec.get(), OmKeyInfo.class, + StringCodec.get(), OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition META_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.META_TABLE, - String.class, StringCodec.get(), - String.class, StringCodec.get()); // Tables for multi-tenancy @@ -197,27 +167,26 @@ public class OMDBDefinition extends DBDefinition.WithMap { TENANT_ACCESS_ID_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TENANT_ACCESS_ID_TABLE, - String.class, // accessId + // accessId StringCodec.get(), - OmDBAccessIdInfo.class, // tenantId, secret, principal + // tenantId, secret, principal OmDBAccessIdInfo.getCodec()); public static final DBColumnFamilyDefinition PRINCIPAL_TO_ACCESS_IDS_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.PRINCIPAL_TO_ACCESS_IDS_TABLE, - String.class, // User principal + // User principal StringCodec.get(), - OmDBUserPrincipalInfo.class, // List of accessIds + // List of accessIds OmDBUserPrincipalInfo.getCodec()); public static final DBColumnFamilyDefinition TENANT_STATE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TENANT_STATE_TABLE, - String.class, // tenantId (tenant name) + // tenantId (tenant name) StringCodec.get(), - OmDBTenantState.class, OmDBTenantState.getCodec()); // End tables for S3 multi-tenancy @@ -226,18 +195,15 @@ public class OMDBDefinition extends DBDefinition.WithMap { SNAPSHOT_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, - String.class, // snapshot path + // snapshot path StringCodec.get(), - SnapshotInfo.class, SnapshotInfo.getCodec()); public static final DBColumnFamilyDefinition COMPACTION_LOG_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.COMPACTION_LOG_TABLE, - String.class, StringCodec.get(), - CompactionLogEntry.class, CompactionLogEntry.getCodec()); /** @@ -254,9 +220,9 @@ public class OMDBDefinition extends DBDefinition.WithMap { SNAPSHOT_RENAMED_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.SNAPSHOT_RENAMED_TABLE, - String.class, // /volumeName/bucketName/objectID + // /volumeName/bucketName/objectID StringCodec.get(), - String.class, // path to key in prev snapshot's key(file)/dir Table. + // path to key in prev snapshot's key(file)/dir Table. StringCodec.get()); private static final Map> @@ -284,7 +250,13 @@ public class OMDBDefinition extends DBDefinition.WithMap { USER_TABLE, VOLUME_TABLE); - public OMDBDefinition() { + private static final OMDBDefinition INSTANCE = new OMDBDefinition(); + + public static OMDBDefinition get() { + return INSTANCE; + } + + private OMDBDefinition() { super(COLUMN_FAMILIES); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java index 18ee42756ef..491f2dadbf8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java @@ -91,6 +91,9 @@ private static void init() { CMD_AUDIT_ACTION_MAP.put(Type.Prepare, OMAction.UPGRADE_PREPARE); CMD_AUDIT_ACTION_MAP.put(Type.CancelPrepare, OMAction.UPGRADE_CANCEL); CMD_AUDIT_ACTION_MAP.put(Type.FinalizeUpgrade, OMAction.UPGRADE_FINALIZE); + CMD_AUDIT_ACTION_MAP.put(Type.GetObjectTagging, OMAction.GET_OBJECT_TAGGING); + CMD_AUDIT_ACTION_MAP.put(Type.PutObjectTagging, OMAction.PUT_OBJECT_TAGGING); + CMD_AUDIT_ACTION_MAP.put(Type.DeleteObjectTagging, OMAction.DELETE_OBJECT_TAGGING); } private static OMAction getAction(OzoneManagerProtocolProtos.OMRequest request) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java index 1dcb0f0cd61..2d59c6259ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java @@ -43,7 +43,7 @@ public interface AuthorizerLock { /** * @return stamp that can be passed to - * {@link this#validateOptimisticRead(long)} to check if a write lock was + * {@link #validateOptimisticRead(long)} to check if a write lock was * acquired since the stamp was issued. * @throws IOException If an ongoing write prevents the lock from moving to * the read state for longer than the timeout. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java index 4aae4d9a77e..31892199bf8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java @@ -130,9 +130,15 @@ public RangerClientMultiTenantAccessController(OzoneConfiguration conf) LOG.info("authType = {}, login user = {}", authType, usernameOrPrincipal); - client = new RangerClient(rangerHttpsAddress, - authType, usernameOrPrincipal, passwordOrKeytab, - rangerServiceName, OzoneConsts.OZONE); + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + try { + client = new RangerClient(rangerHttpsAddress, + authType, usernameOrPrincipal, passwordOrKeytab, + rangerServiceName, OzoneConsts.OZONE); + } finally { + // set back the expected login user + UserGroupInformation.setLoginUser(loginUser); + } // Whether or not the Ranger credentials are valid is unknown right after // RangerClient initialization here. Because RangerClient does not perform diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index a6fcc40dda1..42ae90b9181 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -40,12 +40,12 @@ import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; @@ -216,8 +216,8 @@ private OzoneManagerDoubleBuffer(Builder b) { } public OzoneManagerDoubleBuffer start() { - daemon.start(); isRunning.set(true); + daemon.start(); return this; } @@ -426,8 +426,12 @@ private String addToBatch(Queue buffer, BatchOperation batchOperation) { * in RocksDB callback flush. If multiple operations are flushed in one * specific batch, we are not sure at the flush of which specific operation * the callback is coming. - * There could be a possibility of race condition that is exposed to rocksDB - * behaviour for the batch. + * PurgeSnapshot is also considered a barrier, since purgeSnapshot transaction on a standalone basis is an + * idempotent operation. Once the directory gets deleted the previous transactions that have been performed on the + * snapshotted rocksdb would start failing on replay since those transactions have not been committed but the + * directory could have been partially deleted/ fully deleted. This could also lead to inconsistencies in the DB + * reads from the purged rocksdb if operations are not performed consciously. + * There could be a possibility of race condition that is exposed to rocksDB behaviour for the batch. * Hence, we treat createSnapshot as separate batch flush. *

    * e.g. requestBuffer = [request1, request2, snapshotRequest1, @@ -435,19 +439,17 @@ private String addToBatch(Queue buffer, BatchOperation batchOperation) { * response = [[request1, request2], [snapshotRequest1], [request3], * [snapshotRequest2], [request4]] */ - private List> splitReadyBufferAtCreateSnapshot() { + private synchronized List> splitReadyBufferAtCreateSnapshot() { final List> response = new ArrayList<>(); - OMResponse previousOmResponse = null; for (final Entry entry : readyBuffer) { OMResponse omResponse = entry.getResponse().getOMResponse(); // New queue gets created in three conditions: // 1. It is first element in the response, - // 2. Current request is createSnapshot request. - // 3. Previous request was createSnapshot request. - if (response.isEmpty() || omResponse.hasCreateSnapshotResponse() - || (previousOmResponse != null && - previousOmResponse.hasCreateSnapshotResponse())) { + // 2. Current request is createSnapshot/purgeSnapshot request. + // 3. Previous request was createSnapshot/purgeSnapshot request. + if (response.isEmpty() || isStandaloneBatchCmdTypes(omResponse) + || isStandaloneBatchCmdTypes(previousOmResponse)) { response.add(new LinkedList<>()); } @@ -458,6 +460,15 @@ private List> splitReadyBufferAtCreateSnapshot() { return response; } + private static boolean isStandaloneBatchCmdTypes(OMResponse response) { + if (response == null) { + return false; + } + final OzoneManagerProtocolProtos.Type type = response.getCmdType(); + return type == OzoneManagerProtocolProtos.Type.SnapshotPurge + || type == OzoneManagerProtocolProtos.Type.CreateSnapshot; + } + private void addCleanupEntry(Entry entry, Map> cleanupEpochs) { Class responseClass = entry.getResponse().getClass(); @@ -466,10 +477,7 @@ private void addCleanupEntry(Entry entry, Map> cleanupEpochs) if (cleanupTableInfo != null) { final List cleanupTables; if (cleanupTableInfo.cleanupAll()) { - cleanupTables = new OMDBDefinition().getColumnFamilies() - .stream() - .map(DBColumnFamilyDefinition::getName) - .collect(Collectors.toList()); + cleanupTables = OMDBDefinition.get().getColumnFamilyNames(); } else { cleanupTables = Arrays.asList(cleanupTableInfo.cleanupTables()); } @@ -612,7 +620,7 @@ int getCurrentBufferSize() { return currentBuffer.size(); } - int getReadyBufferSize() { + synchronized int getReadyBufferSize() { return readyBuffer.size(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 78d6ed89d2d..9f187dd0219 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -71,6 +71,7 @@ import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.netty.NettyConfigKeys; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.ClientInvocationId; import org.apache.ratis.protocol.SetConfigurationRequest; import org.apache.ratis.protocol.exceptions.LeaderNotReadyException; import org.apache.ratis.protocol.exceptions.LeaderSteppingDownException; @@ -87,6 +88,7 @@ import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.RaftServerConfigKeys; +import org.apache.ratis.server.RetryCache; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.storage.RaftStorage; import org.apache.ratis.util.LifeCycle; @@ -301,15 +303,23 @@ private RaftClientRequest createRaftRequest(OMRequest omRequest) { } /** - * API used internally from OzoneManager Server when requests needs to be - * submitted to ratis, where the crafted RaftClientRequest is passed along. + * API used internally from OzoneManager Server when requests need to be submitted. * @param omRequest - * @param raftClientRequest + * @param cliId + * @param callId * @return OMResponse * @throws ServiceException */ - public OMResponse submitRequest(OMRequest omRequest, - RaftClientRequest raftClientRequest) throws ServiceException { + public OMResponse submitRequest(OMRequest omRequest, ClientId cliId, long callId) throws ServiceException { + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(cliId) + .setServerId(getRaftPeerId()) + .setGroupId(getRaftGroupId()) + .setCallId(callId) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); RaftClientReply raftClientReply = submitRequestToRatis(raftClientRequest); return createOmResponse(omRequest, raftClientReply); @@ -452,16 +462,11 @@ public void removeRaftPeer(OMNodeDetails omNodeDetails) { * ratis server. */ private RaftClientRequest createRaftRequestImpl(OMRequest omRequest) { - if (!ozoneManager.isTestSecureOmFlag()) { - Preconditions.checkArgument(Server.getClientId() != DUMMY_CLIENT_ID); - Preconditions.checkArgument(Server.getCallId() != INVALID_CALL_ID); - } return RaftClientRequest.newBuilder() - .setClientId( - ClientId.valueOf(UUID.nameUUIDFromBytes(Server.getClientId()))) + .setClientId(getClientId()) .setServerId(server.getId()) .setGroupId(raftGroupId) - .setCallId(Server.getCallId()) + .setCallId(getCallId()) .setMessage( Message.valueOf( OMRatisHelper.convertRequestToByteString(omRequest))) @@ -469,6 +474,39 @@ private RaftClientRequest createRaftRequestImpl(OMRequest omRequest) { .build(); } + private ClientId getClientId() { + final byte[] clientIdBytes = Server.getClientId(); + if (!ozoneManager.isTestSecureOmFlag()) { + Preconditions.checkArgument(clientIdBytes != DUMMY_CLIENT_ID); + } + return ClientId.valueOf(UUID.nameUUIDFromBytes(clientIdBytes)); + } + + private long getCallId() { + final long callId = Server.getCallId(); + if (!ozoneManager.isTestSecureOmFlag()) { + Preconditions.checkArgument(callId != INVALID_CALL_ID); + } + return callId; + } + + public OMResponse checkRetryCache() throws ServiceException { + final ClientInvocationId invocationId = ClientInvocationId.valueOf(getClientId(), getCallId()); + final RetryCache.Entry cacheEntry = getServerDivision().getRetryCache().getIfPresent(invocationId); + if (cacheEntry == null) { + return null; //cache miss + } + //cache hit + try { + return getOMResponse(cacheEntry.getReplyFuture().get()); + } catch (ExecutionException ex) { + throw new ServiceException(ex.getMessage(), ex); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new ServiceException(ex.getMessage(), ex); + } + } + /** * Process the raftClientReply and return OMResponse. * @param omRequest @@ -530,6 +568,10 @@ private OMResponse createOmResponseImpl(OMRequest omRequest, } } + return getOMResponse(reply); + } + + private OMResponse getOMResponse(RaftClientReply reply) throws ServiceException { try { return OMRatisHelper.getOMResponseFromRaftClientReply(reply); } catch (IOException ex) { @@ -539,9 +581,6 @@ private OMResponse createOmResponseImpl(OMRequest omRequest, throw new ServiceException(ex); } } - - // TODO: Still need to handle RaftRetry failure exception and - // NotReplicated exception. } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 463afba9421..6a5274ca01f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.protocolPB.OzoneManagerRequestHandler; import org.apache.hadoop.ozone.protocolPB.RequestHandler; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; @@ -88,7 +89,6 @@ public class OzoneManagerStateMachine extends BaseStateMachine { new SimpleStateMachineStorage(); private final OzoneManager ozoneManager; private RequestHandler handler; - private RaftGroupId raftGroupId; private volatile OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final ExecutorService executorService; private final ExecutorService installSnapshotExecutor; @@ -134,8 +134,8 @@ public void initialize(RaftServer server, RaftGroupId id, RaftStorage raftStorage) throws IOException { getLifeCycle().startAndTransition(() -> { super.initialize(server, id, raftStorage); - this.raftGroupId = id; storage.init(raftStorage); + LOG.info("{}: initialize {} with {}", getId(), id, getLastAppliedTermIndex()); }); } @@ -143,8 +143,9 @@ public void initialize(RaftServer server, RaftGroupId id, public synchronized void reinitialize() throws IOException { loadSnapshotInfoFromDB(); if (getLifeCycleState() == LifeCycle.State.PAUSED) { - unpause(getLastAppliedTermIndex().getIndex(), - getLastAppliedTermIndex().getTerm()); + final TermIndex lastApplied = getLastAppliedTermIndex(); + unpause(lastApplied.getIndex(), lastApplied.getTerm()); + LOG.info("{}: reinitialize {} with {}", getId(), getGroupId(), lastApplied); } } @@ -160,6 +161,7 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, RaftPeerId newLeaderId) { // Initialize OMHAMetrics ozoneManager.omHAMetricsInit(newLeaderId.toString()); + LOG.info("{}: leader changed to {}", groupMemberId, newLeaderId); } /** Notified by Ratis for non-StateMachine term-index update. */ @@ -213,8 +215,15 @@ public void notifyConfigurationChanged(long term, long index, RaftProtos.RaftConfigurationProto newRaftConfiguration) { List newPeers = newRaftConfiguration.getPeersList(); - LOG.info("Received Configuration change notification from Ratis. New Peer" + - " list:\n{}", newPeers); + final StringBuilder logBuilder = new StringBuilder(1024) + .append("notifyConfigurationChanged from Ratis: term=").append(term) + .append(", index=").append(index) + .append(", New Peer list: "); + newPeers.forEach(peer -> logBuilder.append(peer.getId().toStringUtf8()) + .append("(") + .append(peer.getAddress()) + .append("), ")); + LOG.info(logBuilder.substring(0, logBuilder.length() - 2)); List newPeerIds = new ArrayList<>(); for (RaftProtos.RaftPeerProto raftPeerProto : newPeers) { @@ -263,7 +272,7 @@ public TransactionContext startTransaction( messageContent); Preconditions.checkArgument(raftClientRequest.getRaftGroupId().equals( - raftGroupId)); + getGroupId())); try { handler.validateRequest(omRequest); } catch (IOException ioe) { @@ -293,6 +302,10 @@ public TransactionContext preAppendTransaction(TransactionContext trx) OzoneManagerPrepareState prepareState = ozoneManager.getPrepareState(); + if (LOG.isDebugEnabled()) { + LOG.debug("{}: preAppendTransaction {}", getId(), TermIndex.valueOf(trx.getLogEntry())); + } + if (cmdType == OzoneManagerProtocolProtos.Type.Prepare) { // Must authenticate prepare requests here, since we must determine // whether or not to apply the prepare gate before proceeding with the @@ -303,8 +316,7 @@ public TransactionContext preAppendTransaction(TransactionContext trx) if (ozoneManager.getAclsEnabled() && !ozoneManager.isAdmin(userGroupInformation)) { String message = "Access denied for user " + userGroupInformation - + ". " - + "Superuser privilege is required to prepare ozone managers."; + + ". Superuser privilege is required to prepare upgrade/downgrade."; OMException cause = new OMException(message, OMException.ResultCodes.ACCESS_DENIED); // Leader should not step down because of this failure. @@ -341,6 +353,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { : OMRatisHelper.convertByteStringToOMRequest( trx.getStateMachineLogEntry().getLogData()); final TermIndex termIndex = TermIndex.valueOf(trx.getLogEntry()); + LOG.debug("{}: applyTransaction {}", getId(), termIndex); // In the current approach we have one single global thread executor. // with single thread. Right now this is being done for correctness, as // applyTransaction will be run on multiple OM's we want to execute the @@ -427,12 +440,14 @@ public synchronized void pause() { */ public synchronized void unpause(long newLastAppliedSnaphsotIndex, long newLastAppliedSnapShotTermIndex) { - LOG.info("OzoneManagerStateMachine is un-pausing"); if (statePausedCount.decrementAndGet() == 0) { getLifeCycle().startAndTransition(() -> { this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); this.setLastAppliedTermIndex(TermIndex.valueOf( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); + LOG.info("{}: OzoneManagerStateMachine un-pause completed. " + + "newLastAppliedSnaphsotIndex: {}, newLastAppliedSnapShotTermIndex: {}", + getId(), newLastAppliedSnaphsotIndex, newLastAppliedSnapShotTermIndex); }); } } @@ -482,15 +497,15 @@ private synchronized long takeSnapshotImpl() throws IOException { final TermIndex applied = getLastAppliedTermIndex(); final TermIndex notified = getLastNotifiedTermIndex(); final TermIndex snapshot = applied.compareTo(notified) > 0 ? applied : notified; - LOG.info(" applied = {}", applied); - LOG.info(" skipped = {}", lastSkippedIndex); - LOG.info("notified = {}", notified); - LOG.info("snapshot = {}", snapshot); + long startTime = Time.monotonicNow(); final TransactionInfo transactionInfo = TransactionInfo.valueOf(snapshot); ozoneManager.setTransactionInfo(transactionInfo); ozoneManager.getMetadataManager().getTransactionInfoTable().put(TRANSACTION_INFO_KEY, transactionInfo); ozoneManager.getMetadataManager().getStore().flushDB(); + LOG.info("{}: taking snapshot. applied = {}, skipped = {}, " + + "notified = {}, current snapshot index = {}, took {} ms", + getId(), applied, lastSkippedIndex, notified, snapshot, Time.monotonicNow() - startTime); return snapshot.getIndex(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 5dc640c742c..dc634248c28 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -19,6 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import java.io.File; import java.nio.file.InvalidPathException; @@ -77,6 +78,7 @@ import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotRenameRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest; @@ -98,6 +100,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.ClientId; import org.rocksdb.RocksDBException; import java.io.IOException; @@ -107,7 +110,7 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; @@ -117,6 +120,7 @@ public final class OzoneManagerRatisUtils { private static final Logger LOG = LoggerFactory .getLogger(OzoneManagerRatisUtils.class); + private static final RpcController NULL_RPC_CONTROLLER = null; private OzoneManagerRatisUtils() { } @@ -229,6 +233,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new OMSnapshotRenameRequest(omRequest); case SnapshotMoveDeletedKeys: return new OMSnapshotMoveDeletedKeysRequest(omRequest); + case SnapshotMoveTableKeys: + return new OMSnapshotMoveTableKeysRequest(omRequest); case SnapshotPurge: return new OMSnapshotPurgeRequest(omRequest); case SetSnapshotProperty: @@ -334,6 +340,16 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new S3ExpiredMultipartUploadsAbortRequest(omRequest); case QuotaRepair: return new OMQuotaRepairRequest(omRequest); + case PutObjectTagging: + keyArgs = omRequest.getPutObjectTaggingRequest().getKeyArgs(); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + break; + case DeleteObjectTagging: + keyArgs = omRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + break; default: throw new OMException("Unrecognized write command type request " + cmdType, OMException.ResultCodes.INVALID_REQUEST); @@ -401,9 +417,9 @@ private static OMClientRequest getOMAclRequest(OMRequest omRequest, } /** - * Convert exception result to {@link OzoneManagerProtocolProtos.Status}. + * Convert exception result to {@link org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status}. * @param exception - * @return OzoneManagerProtocolProtos.Status + * @return Status */ public static Status exceptionToResponseStatus(Exception exception) { if (exception instanceof OMException) { @@ -433,8 +449,7 @@ public static Status exceptionToResponseStatus(Exception exception) { */ public static TransactionInfo getTrxnInfoFromCheckpoint( OzoneConfiguration conf, Path dbPath) throws Exception { - return HAUtils - .getTrxnInfoFromCheckpoint(conf, dbPath, new OMDBDefinition()); + return HAUtils.getTrxnInfoFromCheckpoint(conf, dbPath, OMDBDefinition.get()); } /** @@ -479,7 +494,7 @@ public static String getOMRatisSnapshotDirectory(ConfigurationSource conf) { OZONE_OM_RATIS_SNAPSHOT_DIR, OZONE_METADATA_DIRS); File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); snapshotDir = Paths.get(metaDirPath.getPath(), - OM_RATIS_SNAPSHOT_DIR).toString(); + OZONE_RATIS_SNAPSHOT_DIR).toString(); } return snapshotDir; } @@ -502,4 +517,13 @@ public static GrpcTlsConfig createServerTlsConfig(SecurityConfig conf, return null; } + + public static OzoneManagerProtocolProtos.OMResponse submitRequest( + OzoneManager om, OMRequest omRequest, ClientId clientId, long callId) throws ServiceException { + if (om.isRatisEnabled()) { + return om.getOmRatisServer().submitRequest(omRequest, clientId, callId); + } else { + return om.getOmServerProtocol().submitRequest(NULL_RPC_CONTROLLER, omRequest); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java index 4a5558ed7f1..5d542bfb912 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java @@ -49,6 +49,10 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestWithFSO; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequestWithFSO; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3DeleteObjectTaggingRequestWithFSO; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3PutObjectTaggingRequest; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3PutObjectTaggingRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import jakarta.annotation.Nonnull; @@ -191,6 +195,23 @@ public final class BucketLayoutAwareOMKeyRequestFactory { addRequestClass(Type.SetTimes, OMKeySetTimesRequestWithFSO.class, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // PutObjectTagging + addRequestClass(Type.PutObjectTagging, + S3PutObjectTaggingRequest.class, + BucketLayout.OBJECT_STORE); + addRequestClass(Type.PutObjectTagging, + S3PutObjectTaggingRequestWithFSO.class, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // DeleteObjectTagging + addRequestClass(Type.DeleteObjectTagging, + S3DeleteObjectTaggingRequest.class, + BucketLayout.OBJECT_STORE); + addRequestClass(Type.DeleteObjectTagging, + S3DeleteObjectTaggingRequestWithFSO.class, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + } private BucketLayoutAwareOMKeyRequestFactory() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 25a204ded27..c9c664b303f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -381,7 +381,6 @@ vol, bucket, key, volOwner, bucketOwner, createUGIForApi(), */ @VisibleForTesting public UserGroupInformation createUGI() throws AuthenticationException { - if (userGroupInformation != null) { return userGroupInformation; } @@ -413,6 +412,11 @@ public UserGroupInformation createUGIForApi() throws OMException { return ugi; } + @VisibleForTesting + public void setUGI(UserGroupInformation ugi) { + this.userGroupInformation = ugi; + } + /** * Return InetAddress created from OMRequest userInfo. If userInfo is not * set, returns null. @@ -438,7 +442,6 @@ public InetAddress getRemoteAddress() throws IOException { * Return String created from OMRequest userInfo. If userInfo is not * set, returns null. * @return String - * @throws IOException */ @VisibleForTesting public String getHostName() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java index 26487935a65..1b318354eeb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java @@ -99,4 +99,17 @@ private static boolean checkInSnapshotCache( } return false; } + + public static boolean shouldLogClientRequestFailure(IOException exception) { + if (!(exception instanceof OMException)) { + return true; + } + OMException omException = (OMException) exception; + switch (omException.getResult()) { + case KEY_NOT_FOUND: + return false; + default: + return true; + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index 9ae6b7e5d50..f73255da117 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -45,7 +45,6 @@ public interface RequestAuditor { * @param auditMap * @param throwable * @param userInfo - * @return */ OMAuditLogger.Builder buildAuditMessage( AuditAction op, Map auditMap, Throwable throwable, UserInfo userInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 72c5cf57d99..3c21a2a851b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -66,10 +66,12 @@ import java.nio.file.InvalidPathException; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; @@ -246,8 +248,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled()); - // Add default acls from volume. - addDefaultAcls(omBucketInfo, omVolumeArgs); + addDefaultAcls(omBucketInfo, omVolumeArgs, ozoneManager); // check namespace quota checkQuotaInNamespace(omVolumeArgs, 1L); @@ -322,16 +323,20 @@ private boolean isECBucket(BucketInfo bucketInfo) { * @param omVolumeArgs */ private void addDefaultAcls(OmBucketInfo omBucketInfo, - OmVolumeArgs omVolumeArgs) { - // Add default acls for bucket creator. + OmVolumeArgs omVolumeArgs, OzoneManager ozoneManager) throws OMException { List acls = new ArrayList<>(); + // Add default acls + acls.addAll(getDefaultAclList(createUGIForApi(), ozoneManager.getConfiguration())); if (omBucketInfo.getAcls() != null) { + // Add acls for bucket creator. acls.addAll(omBucketInfo.getAcls()); } // Add default acls from volume. List defaultVolumeAcls = omVolumeArgs.getDefaultAcls(); OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAcls, ACCESS); + // Remove the duplicates + acls = acls.stream().distinct().collect(Collectors.toList()); omBucketInfo.setAcls(acls); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 53d4c83c3a9..732886fa0e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -22,19 +22,11 @@ import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -74,7 +66,6 @@ import org.apache.hadoop.util.Time; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; @@ -89,11 +80,6 @@ public class OMDirectoryCreateRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(OMDirectoryCreateRequest.class); - // The maximum number of directories which can be created through a single - // transaction (recursive directory creations) is 2^8 - 1 as only 8 - // bits are set aside for this in ObjectID. - private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255; - /** * Stores the result of request execution in * OMClientRequest#validateAndUpdateCache. @@ -117,8 +103,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { super.preExecute(ozoneManager).getCreateDirectoryRequest(); Preconditions.checkNotNull(createDirectoryRequest); - OmUtils.verifyKeyNameWithSnapshotReservedWord( - createDirectoryRequest.getKeyArgs().getKeyName()); + KeyArgs keyArgs = createDirectoryRequest.getKeyArgs(); + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setSnapshotReservedWord(keyArgs.getKeyName()).build(); + validateKey(ozoneManager, validateArgs); KeyArgs.Builder newKeyArgs = createDirectoryRequest.getKeyArgs() .toBuilder().setModificationTime(Time.now()); @@ -202,7 +190,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn dirKeyInfo = createDirectoryKeyInfoWithACL(keyName, keyArgs, baseObjId, omBucketInfo, omPathInfo, trxnLogIndex, - ozoneManager.getDefaultReplicationConfig()); + ozoneManager.getDefaultReplicationConfig(), ozoneManager.getConfiguration()); missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, missingParents, omBucketInfo, omPathInfo, trxnLogIndex); @@ -249,58 +237,6 @@ dirKeyInfo, missingParentInfos, result, getBucketLayout(), return omClientResponse; } - /** - * Construct OmKeyInfo for every parent directory in missing list. - * @param ozoneManager - * @param keyArgs - * @param missingParents list of parent directories to be created - * @param bucketInfo - * @param omPathInfo - * @param trxnLogIndex - * @return - * @throws IOException - */ - public static List getAllParentInfo(OzoneManager ozoneManager, - KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) - throws IOException { - List missingParentInfos = new ArrayList<>(); - - // The base id is left shifted by 8 bits for creating space to - // create (2^8 - 1) object ids in every request. - // maxObjId represents the largest object id allocation possible inside - // the transaction. - long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); - long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; - long objectCount = 1; // baseObjID is used by the leaf directory - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - for (String missingKey : missingParents) { - long nextObjId = baseObjId + objectCount; - if (nextObjId > maxObjId) { - throw new OMException("Too many directories in path. Exceeds limit of " - + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " - + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, - INVALID_KEY_NAME); - } - - LOG.debug("missing parent {} getting added to KeyTable", missingKey); - - OmKeyInfo parentKeyInfo = - createDirectoryKeyInfoWithACL(missingKey, keyArgs, nextObjId, - bucketInfo, omPathInfo, trxnLogIndex, - ozoneManager.getDefaultReplicationConfig()); - objectCount++; - - missingParentInfos.add(parentKeyInfo); - } - - return missingParentInfos; - } - private void logResult(CreateDirectoryRequest createDirectoryRequest, KeyArgs keyArgs, OMMetrics omMetrics, Result result, Exception exception, int numMissingParents) { @@ -335,69 +271,6 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, } } - /** - * fill in a KeyInfo for a new directory entry in OM database. - * without initializing ACLs from the KeyArgs - used for intermediate - * directories which get created internally/recursively during file - * and directory create. - * @param keyName - * @param keyArgs - * @param objectId - * @param bucketInfo - * @param omPathInfo - * @param transactionIndex - * @param serverDefaultReplConfig - * @return the OmKeyInfo structure - */ - public static OmKeyInfo createDirectoryKeyInfoWithACL(String keyName, - KeyArgs keyArgs, long objectId, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfo omPathInfo, long transactionIndex, - ReplicationConfig serverDefaultReplConfig) { - return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId, - serverDefaultReplConfig) - .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) - .setUpdateID(transactionIndex).build(); - } - - private static OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName, - KeyArgs keyArgs, long objectId, - ReplicationConfig serverDefaultReplConfig) { - String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - - OmKeyInfo.Builder keyInfoBuilder = - new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(dirName) - .setOwnerName(keyArgs.getOwnerName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setDataSize(0); - if (keyArgs.getFactor() != null && keyArgs - .getFactor() != HddsProtos.ReplicationFactor.ZERO && keyArgs - .getType() != HddsProtos.ReplicationType.EC) { - // Factor available and not an EC replication config. - keyInfoBuilder.setReplicationConfig(ReplicationConfig - .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor())); - } else if (keyArgs.getType() == HddsProtos.ReplicationType.EC) { - // Found EC type - keyInfoBuilder.setReplicationConfig( - new ECReplicationConfig(keyArgs.getEcReplicationConfig())); - } else { - // default type - keyInfoBuilder.setReplicationConfig(serverDefaultReplConfig); - } - - keyInfoBuilder.setObjectID(objectId); - return keyInfoBuilder; - } - - static long getMaxNumOfRecursiveDirs() { - return MAX_NUM_OF_RECURSIVE_DIRS; - } - @RequestFeatureValidator( conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION, processingPhase = RequestProcessingPhase.PRE_PROCESS, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 46a2ac5f7cc..8bef8e17928 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -51,12 +51,10 @@ import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; import java.util.List; import java.util.Map; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; @@ -145,8 +143,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // prepare all missing parents - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( + missingParentInfos = getAllMissingParentDirInfo( ozoneManager, keyArgs, omBucketInfo, omPathInfo, trxnLogIndex); final long volumeId = omMetadataManager.getVolumeId(volumeName); @@ -163,7 +160,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omPathInfo.getLeafNodeName(), keyArgs, omPathInfo.getLeafNodeObjectId(), omPathInfo.getLastKnownParentId(), trxnLogIndex, - omBucketInfo, omPathInfo); + omBucketInfo, omPathInfo, ozoneManager.getConfiguration()); OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, volumeId, bucketId, trxnLogIndex, missingParentInfos, dirInfo); @@ -235,86 +232,4 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, createDirectoryRequest); } } - - /** - * Construct OmDirectoryInfo for every parent directory in missing list. - * - * @param keyArgs key arguments - * @param pathInfo list of parent directories to be created and its ACLs - * @param trxnLogIndex transaction log index id - * @return list of missing parent directories - * @throws IOException DB failure - */ - public static List getAllMissingParentDirInfo( - OzoneManager ozoneManager, KeyArgs keyArgs, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) - throws IOException { - List missingParentInfos = new ArrayList<>(); - - // The base id is left shifted by 8 bits for creating space to - // create (2^8 - 1) object ids in every request. - // maxObjId represents the largest object id allocation possible inside - // the transaction. - long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); - long maxObjId = baseObjId + getMaxNumOfRecursiveDirs(); - long objectCount = 1; - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - long lastKnownParentId = pathInfo.getLastKnownParentId(); - List missingParents = pathInfo.getMissingParents(); - for (String missingKey : missingParents) { - long nextObjId = baseObjId + objectCount; - if (nextObjId > maxObjId) { - throw new OMException("Too many directories in path. Exceeds limit of " - + getMaxNumOfRecursiveDirs() + ". Unable to create directory: " - + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, - INVALID_KEY_NAME); - } - - LOG.debug("missing parent {} getting added to DirectoryTable", - missingKey); - OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey, - keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, - bucketInfo, pathInfo); - objectCount++; - - missingParentInfos.add(dirInfo); - - // updating id for the next sub-dir - lastKnownParentId = nextObjId; - } - pathInfo.setLastKnownParentId(lastKnownParentId); - pathInfo.setLeafNodeObjectId(baseObjId + objectCount); - return missingParentInfos; - } - - /** - * Fill in a DirectoryInfo for a new directory entry in OM database. - * @param dirName - * @param keyArgs - * @param objectId - * @param parentObjectId - * @param bucketInfo - * @param omPathInfo - * @return the OmDirectoryInfo structure - */ - private static OmDirectoryInfo createDirectoryInfoWithACL( - String dirName, KeyArgs keyArgs, long objectId, - long parentObjectId, long transactionIndex, - OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { - - return OmDirectoryInfo.newBuilder() - .setName(dirName) - .setOwner(keyArgs.getOwnerName()) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setObjectID(objectId) - .setUpdateID(transactionIndex) - .setParentObjectID(parentObjectId) - .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) - .build(); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index c13af319c5c..08b25718288 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -30,9 +30,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -93,16 +91,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { Preconditions.checkNotNull(createFileRequest); KeyArgs keyArgs = createFileRequest.getKeyArgs(); - - // Verify key name - OmUtils.verifyKeyNameWithSnapshotReservedWord(keyArgs.getKeyName()); - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), - OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); - } + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setSnapshotReservedWord(keyArgs.getKeyName()) + .setKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), + OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)).build(); + validateKey(ozoneManager, validateArgs); UserInfo userInfo = getUserInfo(); if (keyArgs.getKeyName().length() == 0) { @@ -254,7 +247,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), omBucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(omBucketInfo, keyArgs); long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); @@ -262,8 +255,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); - missingParentInfos = OMDirectoryCreateRequest - .getAllParentInfo(ozoneManager, keyArgs, + missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, pathInfo.getMissingParents(), omBucketInfo, pathInfo, trxnLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 291b0a8d537..c4967d5af1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -153,9 +153,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getBucketKey(volumeName, bucketName)); // add all missing parents to dir table - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); + missingParentInfos = getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); @@ -171,7 +170,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 3e7549b176e..8f2a768c525 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -1050,7 +1050,7 @@ public static long getParentID(long volumeId, long bucketId, String keyName, * @param volumeName - volume name. * @param bucketName - bucket name. * @param keyName - key name. - * @return + * @return {@code long} * @throws IOException */ public static long getParentId(OMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 2c182a6a5f5..29ed5d9fc7b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -24,13 +24,17 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; + import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -43,8 +47,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; /** * Handles purging of keys from OM DB. @@ -64,21 +70,34 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn List purgeRequests = purgeDirsRequest.getDeletedPathList(); - - SnapshotInfo fromSnapshotInfo = null; Set> lockSet = new HashSet<>(); Map, OmBucketInfo> volBucketInfoMap = new HashMap<>(); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); Map openKeyInfoMap = new HashMap<>(); - OMMetrics omMetrics = ozoneManager.getMetrics(); + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + final SnapshotInfo fromSnapshotInfo; try { - if (fromSnapshot != null) { - fromSnapshotInfo = ozoneManager.getMetadataManager() - .getSnapshotInfoTable() - .get(fromSnapshot); + fromSnapshotInfo = fromSnapshot != null ? SnapshotUtils.getSnapshotInfo(ozoneManager, + fromSnapshot) : null; + // Checking if this request is an old request or new one. + if (purgeDirsRequest.hasExpectedPreviousSnapshotID()) { + // Validating previous snapshot since while purging deletes, a snapshot create request could make this purge + // directory request invalid on AOS since the deletedDirectory would be in the newly created snapshot. Adding + // subdirectories could lead to not being able to reclaim sub-files and subdirectories since the + // file/directory would be present in the newly created snapshot. + // Validating previous snapshot can ensure the chain hasn't changed. + UUID expectedPreviousSnapshotId = purgeDirsRequest.getExpectedPreviousSnapshotID().hasUuid() + ? fromProtobuf(purgeDirsRequest.getExpectedPreviousSnapshotID().getUuid()) : null; + validatePreviousSnapshotId(fromSnapshotInfo, omMetadataManager.getSnapshotChainManager(), + expectedPreviousSnapshotId); } - + } catch (IOException e) { + LOG.error("Error occurred while performing OMDirectoriesPurge. ", e); + return new OMDirectoriesPurgeResponseWithFSO(createErrorOMResponse(omResponse, e)); + } + try { for (OzoneManagerProtocolProtos.PurgePathRequest path : purgeRequests) { for (OzoneManagerProtocolProtos.KeyInfo key : path.getMarkDeletedSubDirsList()) { @@ -150,6 +169,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } } + if (fromSnapshotInfo != null) { + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + } } catch (IOException ex) { // Case of IOException for fromProtobuf will not happen // as this is created and send within OM @@ -165,12 +189,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } - OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( - getOmRequest()); - OMClientResponse omClientResponse = new OMDirectoriesPurgeResponseWithFSO( + return new OMDirectoriesPurgeResponseWithFSO( omResponse.build(), purgeRequests, ozoneManager.isRatisEnabled(), getBucketLayout(), volBucketInfoMap, fromSnapshotInfo, openKeyInfoMap); - - return omClientResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index b8bf89a3542..87d126de98a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -28,11 +28,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -96,19 +94,18 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs keyArgs = commitKeyRequest.getKeyArgs(); - // Verify key name - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), - OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); + if (keyArgs.hasExpectedDataGeneration()) { + ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); } + + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), + OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)).build(); + validateKey(ozoneManager, validateArgs); + boolean isHsync = commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); boolean isRecovery = commitKeyRequest.hasRecovery() && commitKeyRequest.getRecovery(); - boolean enableHsync = ozoneManager.getConfiguration().getBoolean( - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED_DEFAULT); + boolean enableHsync = OzoneFSUtils.canEnableHsync(ozoneManager.getConfiguration(), false); // If hsynced is called for a file, then this file is hsynced, otherwise it's not hsynced. // Currently, file lease recovery by design only supports recover hsynced file @@ -456,7 +453,6 @@ protected List getOmKeyLocationInfos( * @param omMetrics om metrics * @param exception exception trace * @param omKeyInfo omKeyInfo - * @param result result * @param result stores the result of the execution */ @SuppressWarnings("parameternumber") @@ -553,7 +549,7 @@ public static OMRequest blockCommitKeyWithBucketLayoutFromOldClient( public static OMRequest disallowHsync( OMRequest req, ValidationContext ctx) throws OMException { if (!ctx.versionManager() - .isAllowed(OMLayoutFeature.HSYNC)) { + .isAllowed(OMLayoutFeature.HBASE_SUPPORT)) { CommitKeyRequest commitKeyRequest = req.getCommitKeyRequest(); boolean isHSync = commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index d0ed0eacecd..e817901c22e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -28,14 +28,12 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; @@ -93,15 +91,15 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs keyArgs = createKeyRequest.getKeyArgs(); - // Verify key name - OmUtils.verifyKeyNameWithSnapshotReservedWord(keyArgs.getKeyName()); - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(keyArgs.getKeyName()); + if (keyArgs.hasExpectedDataGeneration()) { + ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); } + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setSnapshotReservedWord(keyArgs.getKeyName()) + .setKeyName(keyArgs.getKeyName()).build(); + validateKey(ozoneManager, validateArgs); + String keyPath = keyArgs.getKeyName(); keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), keyPath, getBucketLayout()); @@ -262,9 +260,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn " as there is already file in the given path", NOT_A_FILE); } - missingParentInfos = OMDirectoryCreateRequest - .getAllParentInfo(ozoneManager, keyArgs, - pathInfo.getMissingParents(), bucketInfo, + missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, + pathInfo.getMissingParents(), bucketInfo, pathInfo, trxnLogIndex); numMissingParents = missingParentInfos.size(); @@ -280,7 +277,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), - ozoneManager.isRatisEnabled(), replicationConfig); + ozoneManager.isRatisEnabled(), replicationConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index b370c286e0f..87cc151351e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -138,9 +137,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getBucketKey(volumeName, bucketName)); // add all missing parents to dir table - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); + missingParentInfos = getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); @@ -156,7 +154,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); @@ -246,7 +244,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return + * @return {@code String} * @throws IOException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 9ed92183968..a5e8cb14525 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -21,6 +21,12 @@ import java.io.IOException; import java.util.ArrayList; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -37,6 +43,10 @@ import org.slf4j.LoggerFactory; import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; /** * Handles purging of keys from OM DB. @@ -53,38 +63,60 @@ public OMKeyPurgeRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); - List bucketDeletedKeysList = purgeKeysRequest - .getDeletedKeysList(); - List keysToUpdateList = purgeKeysRequest - .getKeysToUpdateList(); - String fromSnapshot = purgeKeysRequest.hasSnapshotTableKey() ? - purgeKeysRequest.getSnapshotTableKey() : null; - List keysToBePurgedList = new ArrayList<>(); + List bucketDeletedKeysList = purgeKeysRequest.getDeletedKeysList(); + List keysToUpdateList = purgeKeysRequest.getKeysToUpdateList(); + String fromSnapshot = purgeKeysRequest.hasSnapshotTableKey() ? purgeKeysRequest.getSnapshotTableKey() : null; + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); - OMClientResponse omClientResponse = null; - for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { - for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { - keysToBePurgedList.add(deletedKey); + + final SnapshotInfo fromSnapshotInfo; + try { + fromSnapshotInfo = fromSnapshot != null ? SnapshotUtils.getSnapshotInfo(ozoneManager, + fromSnapshot) : null; + // Checking if this request is an old request or new one. + if (purgeKeysRequest.hasExpectedPreviousSnapshotID()) { + // Validating previous snapshot since while purging deletes, a snapshot create request could make this purge + // key request invalid on AOS since the deletedKey would be in the newly created snapshot. This would add an + // redundant tombstone entry in the deletedTable. It is better to skip the transaction. + UUID expectedPreviousSnapshotId = purgeKeysRequest.getExpectedPreviousSnapshotID().hasUuid() + ? fromProtobuf(purgeKeysRequest.getExpectedPreviousSnapshotID().getUuid()) : null; + validatePreviousSnapshotId(fromSnapshotInfo, omMetadataManager.getSnapshotChainManager(), + expectedPreviousSnapshotId); } + } catch (IOException e) { + LOG.error("Error occurred while performing OmKeyPurge. ", e); + return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); + } + + List keysToBePurgedList = new ArrayList<>(); + + for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { + keysToBePurgedList.addAll(bucketWithDeleteKeys.getKeysList()); + } + + if (keysToBePurgedList.isEmpty()) { + return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, + new OMException("None of the keys can be purged be purged since a new snapshot was created for all the " + + "buckets, making this request invalid", OMException.ResultCodes.KEY_DELETION_ERROR))); } + // Setting transaction info for snapshot, this is to prevent duplicate purge requests to OM from background + // services. try { - SnapshotInfo fromSnapshotInfo = null; - if (fromSnapshot != null) { - fromSnapshotInfo = ozoneManager.getMetadataManager() - .getSnapshotInfoTable().get(fromSnapshot); + if (fromSnapshotInfo != null) { + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); } - omClientResponse = new OMKeyPurgeResponse(omResponse.build(), - keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); - } catch (IOException ex) { - omClientResponse = new OMKeyPurgeResponse( - createErrorOMResponse(omResponse, ex)); + } catch (IOException e) { + return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); } - return omClientResponse; + return new OMKeyPurgeResponse(omResponse.build(), + keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 804e536d21f..35940f5a770 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -24,9 +24,7 @@ import com.google.common.base.Preconditions; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; @@ -83,15 +81,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .getRenameKeyRequest(); Preconditions.checkNotNull(renameKeyRequest); - // Verify key name - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(renameKeyRequest.getToKeyName()); - } - KeyArgs renameKeyArgs = renameKeyRequest.getKeyArgs(); + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setKeyName(renameKeyRequest.getToKeyName()).build(); + validateKey(ozoneManager, validateArgs); String srcKey = extractSrcKey(renameKeyArgs); String dstKey = extractDstKey(renameKeyRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index 72365221d3b..e57b6d99fd4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -395,7 +395,7 @@ private Map buildAuditMap( * level, e.g. source is /vol1/buck1/dir1/key1 and dest is /vol1/buck1). * * @param request - * @return + * @return {@code String} * @throws OMException */ @Override @@ -410,7 +410,7 @@ protected String extractDstKey(RenameKeyRequest request) throws OMException { * Returns the validated and normalized source key name. * * @param keyArgs - * @return + * @return {@code String} * @throws OMException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 09e5d8bca06..6a467f3acf5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -39,6 +39,8 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -47,16 +49,19 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.PrefixManager; import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; @@ -98,9 +103,11 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; @@ -109,6 +116,11 @@ */ public abstract class OMKeyRequest extends OMClientRequest { + // The maximum number of directories which can be created through a single + // transaction (recursive directory creations) is 2^8 - 1 as only 8 + // bits are set aside for this in ObjectID. + private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255; + @VisibleForTesting public static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class); @@ -176,6 +188,80 @@ protected KeyArgs resolveBucketAndCheckOpenKeyAcls(KeyArgs keyArgs, return resolvedArgs; } + /** + * Define the parameters carried when verifying the Key. + */ + public static class ValidateKeyArgs { + private String snapshotReservedWord; + private String keyName; + private boolean validateSnapshotReserved; + private boolean validateKeyName; + + ValidateKeyArgs(String snapshotReservedWord, String keyName, + boolean validateSnapshotReserved, boolean validateKeyName) { + this.snapshotReservedWord = snapshotReservedWord; + this.keyName = keyName; + this.validateSnapshotReserved = validateSnapshotReserved; + this.validateKeyName = validateKeyName; + } + + public String getSnapshotReservedWord() { + return snapshotReservedWord; + } + + public String getKeyName() { + return keyName; + } + + public boolean isValidateSnapshotReserved() { + return validateSnapshotReserved; + } + + public boolean isValidateKeyName() { + return validateKeyName; + } + + /** + * Tools for building {@link ValidateKeyArgs}. + */ + public static class Builder { + private String snapshotReservedWord; + private String keyName; + private boolean validateSnapshotReserved; + private boolean validateKeyName; + + public Builder setSnapshotReservedWord(String snapshotReservedWord) { + this.snapshotReservedWord = snapshotReservedWord; + this.validateSnapshotReserved = true; + return this; + } + + public Builder setKeyName(String keyName) { + this.keyName = keyName; + this.validateKeyName = true; + return this; + } + + public ValidateKeyArgs build() { + return new ValidateKeyArgs(snapshotReservedWord, keyName, + validateSnapshotReserved, validateKeyName); + } + } + } + + protected void validateKey(OzoneManager ozoneManager, ValidateKeyArgs validateKeyArgs) + throws OMException { + if (validateKeyArgs.isValidateSnapshotReserved()) { + OmUtils.verifyKeyNameWithSnapshotReservedWord(validateKeyArgs.getSnapshotReservedWord()); + } + final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() + .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, + OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); + if (validateKeyArgs.isValidateKeyName() && checkKeyNameEnabled) { + OmUtils.validateKeyName(validateKeyArgs.getKeyName()); + } + } + /** * This methods avoids multiple rpc calls to SCM by allocating multiple blocks * in one rpc call. @@ -325,11 +411,12 @@ public EncryptedKeyVersion run() throws IOException { return edek; } - protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, + protected List getAclsForKey(KeyArgs keyArgs, OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, - PrefixManager prefixManager) { + PrefixManager prefixManager, OzoneConfiguration config) throws OMException { List acls = new ArrayList<>(); + acls.addAll(getDefaultAclList(createUGIForApi(), config)); if (keyArgs.getAclsList() != null) { acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); } @@ -347,6 +434,8 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); if (prefixInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls(), ACCESS)) { + // Remove the duplicates + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } @@ -357,6 +446,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // prefix are not set if (omPathInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls(), ACCESS)) { + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } @@ -365,10 +455,12 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // parent-dir are not set. if (bucketInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), ACCESS)) { + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } @@ -377,12 +469,15 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, * @param keyArgs * @param bucketInfo * @param omPathInfo + * @param config * @return Acls which inherited parent DEFAULT and keyArgs ACCESS acls. */ - protected static List getAclsForDir(KeyArgs keyArgs, - OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { + protected List getAclsForDir(KeyArgs keyArgs, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, OzoneConfiguration config) throws OMException { // Acls inherited from parent or bucket will convert to DEFAULT scope List acls = new ArrayList<>(); + // add default ACLs + acls.addAll(getDefaultAclList(createUGIForApi(), config)); // Inherit DEFAULT acls from parent-dir if (omPathInfo != null) { @@ -395,12 +490,207 @@ protected static List getAclsForDir(KeyArgs keyArgs, OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), DEFAULT); } - // add itself acls + // add acls from clients acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); - + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } + /** + * Construct OmDirectoryInfo for every parent directory in missing list. + * + * @param keyArgs key arguments + * @param pathInfo list of parent directories to be created and its ACLs + * @param trxnLogIndex transaction log index id + * @return list of missing parent directories + * @throws IOException DB failure + */ + protected List getAllMissingParentDirInfo( + OzoneManager ozoneManager, KeyArgs keyArgs, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) + throws IOException { + List missingParentInfos = new ArrayList<>(); + + // The base id is left shifted by 8 bits for creating space to + // create (2^8 - 1) object ids in every request. + // maxObjId represents the largest object id allocation possible inside + // the transaction. + long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); + long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; + long objectCount = 1; + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + long lastKnownParentId = pathInfo.getLastKnownParentId(); + List missingParents = pathInfo.getMissingParents(); + for (String missingKey : missingParents) { + long nextObjId = baseObjId + objectCount; + if (nextObjId > maxObjId) { + throw new OMException("Too many directories in path. Exceeds limit of " + + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " + + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, + INVALID_KEY_NAME); + } + + LOG.debug("missing parent {} getting added to DirectoryTable", + missingKey); + OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey, + keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, + bucketInfo, pathInfo, ozoneManager.getConfiguration()); + objectCount++; + + missingParentInfos.add(dirInfo); + + // updating id for the next sub-dir + lastKnownParentId = nextObjId; + } + pathInfo.setLastKnownParentId(lastKnownParentId); + pathInfo.setLeafNodeObjectId(baseObjId + objectCount); + return missingParentInfos; + } + + /** + * Construct OmKeyInfo for every parent directory in missing list. + * @param ozoneManager + * @param keyArgs + * @param missingParents list of parent directories to be created + * @param bucketInfo + * @param omPathInfo + * @param trxnLogIndex + * @return {@code List} + * @throws IOException + */ + protected List getAllParentInfo(OzoneManager ozoneManager, + KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) + throws IOException { + List missingParentInfos = new ArrayList<>(); + + // The base id is left shifted by 8 bits for creating space to + // create (2^8 - 1) object ids in every request. + // maxObjId represents the largest object id allocation possible inside + // the transaction. + long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); + long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; + long objectCount = 1; // baseObjID is used by the leaf directory + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + for (String missingKey : missingParents) { + long nextObjId = baseObjId + objectCount; + if (nextObjId > maxObjId) { + throw new OMException("Too many directories in path. Exceeds limit of " + + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " + + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, + INVALID_KEY_NAME); + } + + LOG.debug("missing parent {} getting added to KeyTable", missingKey); + + OmKeyInfo parentKeyInfo = + createDirectoryKeyInfoWithACL(missingKey, keyArgs, nextObjId, + bucketInfo, omPathInfo, trxnLogIndex, + ozoneManager.getDefaultReplicationConfig(), ozoneManager.getConfiguration()); + objectCount++; + + missingParentInfos.add(parentKeyInfo); + } + + return missingParentInfos; + } + + /** + * Fill in a DirectoryInfo for a new directory entry in OM database. + * @param dirName + * @param keyArgs + * @param objectId + * @param parentObjectId + * @param bucketInfo + * @param omPathInfo + * @param config + * @return the OmDirectoryInfo structure + */ + @SuppressWarnings("parameternumber") + protected OmDirectoryInfo createDirectoryInfoWithACL( + String dirName, KeyArgs keyArgs, long objectId, + long parentObjectId, long transactionIndex, + OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, + OzoneConfiguration config) throws OMException { + return OmDirectoryInfo.newBuilder() + .setName(dirName) + .setOwner(keyArgs.getOwnerName()) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setObjectID(objectId) + .setUpdateID(transactionIndex) + .setParentObjectID(parentObjectId).setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo, config)) + .build(); + } + + /** + * fill in a KeyInfo for a new directory entry in OM database. + * without initializing ACLs from the KeyArgs - used for intermediate + * directories which get created internally/recursively during file + * and directory create. + * @param keyName + * @param keyArgs + * @param objectId + * @param bucketInfo + * @param omPathInfo + * @param transactionIndex + * @param serverDefaultReplConfig + * @param config + * @return the OmKeyInfo structure + */ + @SuppressWarnings("parameternumber") + protected OmKeyInfo createDirectoryKeyInfoWithACL(String keyName, + KeyArgs keyArgs, long objectId, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long transactionIndex, + ReplicationConfig serverDefaultReplConfig, OzoneConfiguration config) throws OMException { + return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId, + serverDefaultReplConfig) + .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo, config)) + .setUpdateID(transactionIndex).build(); + } + + protected OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName, KeyArgs keyArgs, long objectId, + ReplicationConfig serverDefaultReplConfig) { + String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + + OmKeyInfo.Builder keyInfoBuilder = + new OmKeyInfo.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(dirName) + .setOwnerName(keyArgs.getOwnerName()) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setDataSize(0); + if (keyArgs.getFactor() != null && keyArgs + .getFactor() != HddsProtos.ReplicationFactor.ZERO && keyArgs + .getType() != HddsProtos.ReplicationType.EC) { + // Factor available and not an EC replication config. + keyInfoBuilder.setReplicationConfig(ReplicationConfig + .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor())); + } else if (keyArgs.getType() == HddsProtos.ReplicationType.EC) { + // Found EC type + keyInfoBuilder.setReplicationConfig( + new ECReplicationConfig(keyArgs.getEcReplicationConfig())); + } else { + // default type + keyInfoBuilder.setReplicationConfig(serverDefaultReplConfig); + } + + keyInfoBuilder.setObjectID(objectId); + return keyInfoBuilder; + } + /** * Check Acls for the ozone bucket. * @param ozoneManager @@ -611,7 +901,7 @@ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, /** * Get FileEncryptionInfoProto from KeyArgs. * @param keyArgs - * @return + * @return FileEncryptionInfo */ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { FileEncryptionInfo encryptionInfo = null; @@ -623,7 +913,7 @@ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { /** * Check bucket quota in bytes. - * @paran metadataManager + * @param metadataManager * @param omBucketInfo * @param allocateSize * @throws IOException @@ -726,12 +1016,12 @@ protected OmKeyInfo prepareKeyInfo( @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, boolean isRatisEnabled, - ReplicationConfig replicationConfig) + ReplicationConfig replicationConfig, OzoneConfiguration config) throws IOException { return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size, locations, encInfo, prefixManager, omBucketInfo, omPathInfo, - transactionLogIndex, objectID, isRatisEnabled, replicationConfig); + transactionLogIndex, objectID, isRatisEnabled, replicationConfig, config); } /** @@ -749,12 +1039,12 @@ protected OmKeyInfo prepareFileInfo( @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, - boolean isRatisEnabled, ReplicationConfig replicationConfig) - throws IOException { + boolean isRatisEnabled, ReplicationConfig replicationConfig, + OzoneConfiguration config) throws IOException { if (keyArgs.getIsMultipartKey()) { return prepareMultipartFileInfo(omMetadataManager, keyArgs, size, locations, encInfo, prefixManager, omBucketInfo, - omPathInfo, transactionLogIndex, objectID); + omPathInfo, transactionLogIndex, objectID, config); //TODO args.getMetadata } if (dbKeyInfo != null) { @@ -797,7 +1087,7 @@ protected OmKeyInfo prepareFileInfo( // Blocks will be appended as version 0. return createFileInfo(keyArgs, locations, replicationConfig, keyArgs.getDataSize(), encInfo, prefixManager, - omBucketInfo, omPathInfo, transactionLogIndex, objectID); + omBucketInfo, omPathInfo, transactionLogIndex, objectID, config); } /** @@ -814,7 +1104,8 @@ protected OmKeyInfo createFileInfo( @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, - long transactionLogIndex, long objectID) { + long transactionLogIndex, long objectID, + OzoneConfiguration config) throws OMException { OmKeyInfo.Builder builder = new OmKeyInfo.Builder(); builder.setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) @@ -827,7 +1118,7 @@ protected OmKeyInfo createFileInfo( .setReplicationConfig(replicationConfig) .setFileEncryptionInfo(encInfo) .setAcls(getAclsForKey( - keyArgs, omBucketInfo, omPathInfo, prefixManager)) + keyArgs, omBucketInfo, omPathInfo, prefixManager, config)) .addAllMetadata(KeyValueUtil.getFromProtobuf( keyArgs.getMetadataList())) .addAllTags(KeyValueUtil.getFromProtobuf( @@ -861,8 +1152,8 @@ private OmKeyInfo prepareMultipartFileInfo( FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, - @Nonnull long transactionLogIndex, long objectID) - throws IOException { + @Nonnull long transactionLogIndex, long objectID, + OzoneConfiguration configuration) throws IOException { Preconditions.checkArgument(args.getMultipartNumber() > 0, "PartNumber Should be greater than zero"); @@ -900,7 +1191,7 @@ private OmKeyInfo prepareMultipartFileInfo( // is not an actual key, it is a part of the key. return createFileInfo(args, locations, partKeyInfo.getReplicationConfig(), size, encInfo, prefixManager, omBucketInfo, omPathInfo, - transactionLogIndex, objectID); + transactionLogIndex, objectID, configuration); } /** @@ -911,7 +1202,7 @@ private OmKeyInfo prepareMultipartFileInfo( * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return + * @return {@code String} * @throws IOException */ protected String getDBMultipartOpenKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 26c559eef6e..0a2703c769e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -209,7 +209,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfo, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(objectID) .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index de78c665110..d55a7b41918 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -121,8 +120,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn volumeName, bucketName); // add all missing parents to dir table - missingParentInfos = OMDirectoryCreateRequestWithFSO - .getAllMissingParentDirInfo(ozoneManager, keyArgs, bucketInfo, + missingParentInfos = getAllMissingParentDirInfo(ozoneManager, keyArgs, bucketInfo, pathInfoFSO, transactionLogIndex); // We are adding uploadId to key, because if multiple users try to @@ -185,7 +183,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfoFSO, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(pathInfoFSO.getLeafNodeObjectId()) .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 597a40006f9..2bb77005c95 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.OzoneConfigUtil; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.ratis.server.protocol.TermIndex; @@ -187,8 +186,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest .verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName)); - missingParentInfos = OMDirectoryCreateRequestWithFSO - .getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, + missingParentInfos = getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, pathInfoFSO, trxnLogIndex); if (missingParentInfos != null) { @@ -236,7 +234,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(pathInfoFSO.getLeafNodeObjectId()) .setUpdateID(trxnLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java new file mode 100644 index 00000000000..6146e1ac105 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles delete object tagging request. + */ +public class S3DeleteObjectTaggingRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3DeleteObjectTaggingRequest.class); + + public S3DeleteObjectTaggingRequest(OMRequest omRequest, BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + DeleteObjectTaggingRequest deleteObjectTaggingRequest = + super.preExecute(ozoneManager).getDeleteObjectTaggingRequest(); + Preconditions.checkNotNull(deleteObjectTaggingRequest); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + + String keyPath = keyArgs.getKeyName(); + keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), + keyPath, getBucketLayout()); + + KeyArgs.Builder newKeyArgs = + keyArgs.toBuilder() + .setKeyName(keyPath); + + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), + ozoneManager, ACLType.WRITE); + return getOmRequest().toBuilder() + .setUserInfo(getUserInfo()) + .setDeleteObjectTaggingRequest( + deleteObjectTaggingRequest.toBuilder().setKeyArgs(resolvedArgs)) + .build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumDeleteObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + + OmKeyInfo omKeyInfo = + omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); + if (omKeyInfo == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + // Clear / delete the tags + omKeyInfo.getTags().clear(); + // Set the UpdateID to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache + omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry( + new CacheKey<>(dbOzoneKey), + CacheValue.get(trxnLogIndex, omKeyInfo) + ); + + omClientResponse = new S3DeleteObjectTaggingResponse( + omResponse.setDeleteObjectTaggingResponse(DeleteObjectTaggingResponse.newBuilder()).build(), + omKeyInfo + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3DeleteObjectTaggingResponse( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + markForAudit(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.DELETE_OBJECT_TAGGING, auditMap, exception, getOmRequest().getUserInfo() + )); + + switch (result) { + case SUCCESS: + LOG.debug("Delete object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumDeleteObjectTaggingFails(); + if (OMClientRequestUtils.shouldLogClientRequestFailure(exception)) { + LOG.error("Delete object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + } + break; + default: + LOG.error("Unrecognized Result for S3DeleteObjectTaggingRequest: {}", + deleteObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..fb0561702a6 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3DeleteObjectTaggingResponseWithFSO; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles delete object tagging request for FSO bucket. + */ +public class S3DeleteObjectTaggingRequestWithFSO extends S3DeleteObjectTaggingRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3DeleteObjectTaggingRequestWithFSO.class); + + public S3DeleteObjectTaggingRequestWithFSO(OMRequest omRequest, + BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumDeleteObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists( + omMetadataManager, volumeName, bucketName, keyName, 0, + ozoneManager.getDefaultReplicationConfig()); + + if (keyStatus == null) { + throw new OMException("Key not found. Key: " + keyName, ResultCodes.KEY_NOT_FOUND); + } + + boolean isDirectory = keyStatus.isDirectory(); + + if (isDirectory) { + throw new OMException("DeleteObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + final String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + // Clear / delete the tags + omKeyInfo.getTags().clear(); + // Set the UpdateId to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache for file table. No need to check directory table since + // DeleteObjectTagging rejects operations on FSO directory + omMetadataManager.getKeyTable(getBucketLayout()) + .addCacheEntry(new CacheKey<>(dbKey), + CacheValue.get(trxnLogIndex, omKeyInfo)); + + omClientResponse = new S3DeleteObjectTaggingResponseWithFSO( + omResponse.setDeleteObjectTaggingResponse(DeleteObjectTaggingResponse.newBuilder()).build(), + omKeyInfo, volumeId, bucketId + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3DeleteObjectTaggingResponseWithFSO( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + switch (result) { + case SUCCESS: + LOG.debug("Delete object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumDeleteObjectTaggingFails(); + LOG.error("Delete object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for S3DeleteObjectTaggingRequest: {}", + deleteObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java new file mode 100644 index 00000000000..aab67830383 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3PutObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles put object tagging request. + */ +public class S3PutObjectTaggingRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3PutObjectTaggingRequest.class); + + public S3PutObjectTaggingRequest(OMRequest omRequest, BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + PutObjectTaggingRequest putObjectTaggingRequest = + super.preExecute(ozoneManager).getPutObjectTaggingRequest(); + Preconditions.checkNotNull(putObjectTaggingRequest); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + + String keyPath = keyArgs.getKeyName(); + keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), + keyPath, getBucketLayout()); + + KeyArgs.Builder newKeyArgs = + keyArgs.toBuilder() + .setKeyName(keyPath); + + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), + ozoneManager, ACLType.WRITE); + return getOmRequest().toBuilder() + .setUserInfo(getUserInfo()) + .setPutObjectTaggingRequest( + putObjectTaggingRequest.toBuilder().setKeyArgs(resolvedArgs)) + .build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumPutObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + + OmKeyInfo omKeyInfo = + omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); + if (omKeyInfo == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + // Set the tags + omKeyInfo.getTags().clear(); + omKeyInfo.getTags().putAll(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())); + // Set the UpdateID to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache + omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry( + new CacheKey<>(dbOzoneKey), + CacheValue.get(trxnLogIndex, omKeyInfo) + ); + + omClientResponse = new S3PutObjectTaggingResponse( + omResponse.setPutObjectTaggingResponse(PutObjectTaggingResponse.newBuilder()).build(), + omKeyInfo + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3PutObjectTaggingResponse( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + markForAudit(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.PUT_OBJECT_TAGGING, auditMap, exception, getOmRequest().getUserInfo() + )); + + switch (result) { + case SUCCESS: + LOG.debug("Put object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumPutObjectTaggingFails(); + if (OMClientRequestUtils.shouldLogClientRequestFailure(exception)) { + LOG.error("Put object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + } + break; + default: + LOG.error("Unrecognized Result for S3PutObjectTaggingRequest: {}", + putObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..2b6ca8601cb --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3PutObjectTaggingResponseWithFSO; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles put object tagging request for FSO bucket. + */ +public class S3PutObjectTaggingRequestWithFSO extends S3PutObjectTaggingRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3PutObjectTaggingRequestWithFSO.class); + + public S3PutObjectTaggingRequestWithFSO(OMRequest omRequest, + BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumPutObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists( + omMetadataManager, volumeName, bucketName, keyName, 0, + ozoneManager.getDefaultReplicationConfig()); + + if (keyStatus == null) { + throw new OMException("Key not found. Key: " + keyName, ResultCodes.KEY_NOT_FOUND); + } + + boolean isDirectory = keyStatus.isDirectory(); + + if (isDirectory) { + throw new OMException("PutObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + final String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + // Set the tags + omKeyInfo.getTags().clear(); + omKeyInfo.getTags().putAll(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())); + // Set the UpdateId to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache for file table. No need to check directory table since + // PutObjectTagging rejects operations on FSO directory + omMetadataManager.getKeyTable(getBucketLayout()) + .addCacheEntry(new CacheKey<>(dbKey), + CacheValue.get(trxnLogIndex, omKeyInfo)); + + omClientResponse = new S3PutObjectTaggingResponseWithFSO( + omResponse.setPutObjectTaggingResponse(PutObjectTaggingResponse.newBuilder()).build(), + omKeyInfo, volumeId, bucketId + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3PutObjectTaggingResponseWithFSO( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + switch (result) { + case SUCCESS: + LOG.debug("Put object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumPutObjectTaggingFails(); + LOG.error("Put object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for S3PutObjectTaggingRequest: {}", + putObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java new file mode 100644 index 00000000000..d3f26d195ad --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +/** + * Package contains classes related to S3 tagging requests. + */ +package org.apache.hadoop.ozone.om.request.s3.tagging; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 3aa4151cea3..59cc02b6fdb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -18,9 +18,12 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -74,8 +77,8 @@ public class OMSnapshotCreateRequest extends OMClientRequest { LoggerFactory.getLogger(OMSnapshotCreateRequest.class); private final String snapshotPath; - private final String volumeName; - private final String bucketName; + private String volumeName; + private String bucketName; private final String snapshotName; private final SnapshotInfo snapshotInfo; @@ -105,7 +108,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final OMRequest omRequest = super.preExecute(ozoneManager); // Verify name OmUtils.validateSnapshotName(snapshotName); - + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket bucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this); + this.volumeName = bucket.realVolume(); + this.bucketName = bucket.realBucket(); UserGroupInformation ugi = createUGIForApi(); String bucketOwner = ozoneManager.getBucketOwner(volumeName, bucketName, IAccessAuthorizer.ACLType.READ, OzoneObj.ResourceType.BUCKET); @@ -115,12 +122,12 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { "Only bucket owners and Ozone admins can create snapshots", OMException.ResultCodes.PERMISSION_DENIED); } - - return omRequest.toBuilder().setCreateSnapshotRequest( - omRequest.getCreateSnapshotRequest().toBuilder() - .setSnapshotId(toProtobuf(UUID.randomUUID())) - .setCreationTime(Time.now()) - .build()).build(); + CreateSnapshotRequest.Builder createSnapshotRequest = omRequest.getCreateSnapshotRequest().toBuilder() + .setSnapshotId(toProtobuf(UUID.randomUUID())) + .setVolumeName(volumeName) + .setBucketName(this.bucketName) + .setCreationTime(Time.now()); + return omRequest.toBuilder().setCreateSnapshotRequest(createSnapshotRequest.build()).build(); } @Override @@ -166,7 +173,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ((RDBStore) omMetadataManager.getStore()).getDb() .getLatestSequenceNumber(); snapshotInfo.setDbTxSequenceNumber(dbLatestSequenceNumber); - + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); // Snapshot referenced size should be bucket's used bytes OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index a2b00138cf3..95f99c627c4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -82,6 +84,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { String volumeName = deleteSnapshotRequest.getVolumeName(); String bucketName = deleteSnapshotRequest.getBucketName(); + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this); + volumeName = resolvedBucket.realVolume(); + bucketName = resolvedBucket.realBucket(); // Permission check UserGroupInformation ugi = createUGIForApi(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java index 122108ad65f..18055bdda40 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java @@ -20,9 +20,11 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -60,7 +62,6 @@ public OMSnapshotMoveDeletedKeysRequest(OMRequest omRequest) { @Override @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -78,19 +79,26 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); try { - nextSnapshot = SnapshotUtils.getNextActiveSnapshot(fromSnapshot, - snapshotChainManager, omSnapshotManager); + // Check the snapshot exists. + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot.getTableKey()); + + nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); // Get next non-deleted snapshot. - List nextDBKeysList = - moveDeletedKeysRequest.getNextDBKeysList(); - List reclaimKeysList = - moveDeletedKeysRequest.getReclaimKeysList(); - List renamedKeysList = - moveDeletedKeysRequest.getRenamedKeysList(); - List movedDirs = - moveDeletedKeysRequest.getDeletedDirsToMoveList(); + List nextDBKeysList = moveDeletedKeysRequest.getNextDBKeysList(); + List reclaimKeysList = moveDeletedKeysRequest.getReclaimKeysList(); + List renamedKeysList = moveDeletedKeysRequest.getRenamedKeysList(); + List movedDirs = moveDeletedKeysRequest.getDeletedDirsToMoveList(); + // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshot)); + if (nextSnapshot != null) { + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), nextSnapshot)); + } omClientResponse = new OMSnapshotMoveDeletedKeysResponse( omResponse.build(), fromSnapshot, nextSnapshot, nextDBKeysList, reclaimKeysList, renamedKeysList, movedDirs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java new file mode 100644 index 00000000000..0eb0d3cd166 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveTableKeysResponse; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; +import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; + +/** + * Handles OMSnapshotMoveTableKeysRequest Request. + * This is an OM internal request. Does not need @RequireSnapshotFeatureState. + */ +public class OMSnapshotMoveTableKeysRequest extends OMClientRequest { + + private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotMoveTableKeysRequest.class); + + public OMSnapshotMoveTableKeysRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); + SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); + SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, + snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); + String bucketKeyPrefix = omMetadataManager.getBucketKeyPrefix(fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); + String bucketKeyPrefixFSO = omMetadataManager.getBucketKeyPrefixFSO(fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); + + Set keys = new HashSet<>(); + List deletedKeys = new ArrayList<>(moveTableKeysRequest.getDeletedKeysList().size()); + + //validate deleted key starts with bucket prefix.[///] + for (SnapshotMoveKeyInfos deletedKey : moveTableKeysRequest.getDeletedKeysList()) { + // Filter only deleted keys with at least one keyInfo per key. + if (!deletedKey.getKeyInfosList().isEmpty()) { + deletedKeys.add(deletedKey); + if (!deletedKey.getKey().startsWith(bucketKeyPrefix)) { + throw new OMException("Deleted Key: " + deletedKey + " doesn't start with prefix " + bucketKeyPrefix, + OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(deletedKey.getKey())) { + throw new OMException("Duplicate Deleted Key: " + deletedKey + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(deletedKey.getKey()); + } + } + } + + keys.clear(); + List renamedKeysList = new ArrayList<>(moveTableKeysRequest.getRenamedKeysList().size()); + //validate rename key starts with bucket prefix.[///] + for (HddsProtos.KeyValue renamedKey : moveTableKeysRequest.getRenamedKeysList()) { + if (renamedKey.hasKey() && renamedKey.hasValue()) { + renamedKeysList.add(renamedKey); + if (!renamedKey.getKey().startsWith(bucketKeyPrefix)) { + throw new OMException("Rename Key: " + renamedKey + " doesn't start with prefix " + bucketKeyPrefix, + OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(renamedKey.getKey())) { + throw new OMException("Duplicate rename Key: " + renamedKey + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(renamedKey.getKey()); + } + } + } + keys.clear(); + + // Filter only deleted dirs with only one keyInfo per key. + List deletedDirs = new ArrayList<>(moveTableKeysRequest.getDeletedDirsList().size()); + //validate deleted key starts with bucket FSO path prefix.[///] + for (SnapshotMoveKeyInfos deletedDir : moveTableKeysRequest.getDeletedDirsList()) { + // Filter deleted directories with exactly one keyInfo per key. + if (deletedDir.getKeyInfosList().size() == 1) { + deletedDirs.add(deletedDir); + if (!deletedDir.getKey().startsWith(bucketKeyPrefixFSO)) { + throw new OMException("Deleted dir: " + deletedDir + " doesn't start with prefix " + + bucketKeyPrefixFSO, OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(deletedDir.getKey())) { + throw new OMException("Duplicate deleted dir Key: " + deletedDir + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(deletedDir.getKey()); + } + } + } + return getOmRequest().toBuilder().setSnapshotMoveTableKeysRequest( + moveTableKeysRequest.toBuilder().clearDeletedDirs().clearDeletedKeys().clearRenamedKeys() + .addAllDeletedKeys(deletedKeys).addAllDeletedDirs(deletedDirs) + .addAllRenamedKeys(renamedKeysList).build()).build(); + } + + @Override + @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); + + SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); + + OMClientResponse omClientResponse; + OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); + try { + SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, + snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); + // If there is no snapshot in the chain after the current snapshot move the keys to Active Object Store. + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); + + // If next snapshot is not active then ignore move. Since this could be a redundant operations. + if (nextSnapshot != null && nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { + throw new OMException("Next snapshot : " + nextSnapshot + " in chain is not active.", + OMException.ResultCodes.INVALID_SNAPSHOT_ERROR); + } + + // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshot)); + if (nextSnapshot != null) { + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), nextSnapshot)); + } + omClientResponse = new OMSnapshotMoveTableKeysResponse(omResponse.build(), fromSnapshot, nextSnapshot, + moveTableKeysRequest.getDeletedKeysList(), moveTableKeysRequest.getDeletedDirsList(), + moveTableKeysRequest.getRenamedKeysList()); + } catch (IOException ex) { + omClientResponse = new OMSnapshotMoveTableKeysResponse(createErrorOMResponse(omResponse, ex)); + } + return omClientResponse; + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 2a9cfa6baf0..38c51d4de5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,12 +19,13 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -54,6 +55,13 @@ public class OMSnapshotPurgeRequest extends OMClientRequest { private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotPurgeRequest.class); + /** + * This map contains up to date snapshotInfo and works as a local cache for OMSnapshotPurgeRequest. + * Since purge and other updates happen in sequence inside validateAndUpdateCache, we can get updated snapshotInfo + * from this map rather than getting form snapshotInfoTable which creates a deep copy for every get call. + */ + private final Map updatedSnapshotInfos = new HashMap<>(); + public OMSnapshotPurgeRequest(OMRequest omRequest) { super(omRequest); } @@ -64,7 +72,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn final long trxnLogIndex = termIndex.getIndex(); - OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -80,9 +87,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn try { List snapshotDbKeys = snapshotPurgeRequest .getSnapshotDBKeysList(); - Map updatedSnapInfos = new HashMap<>(); - Map updatedPathPreviousAndGlobalSnapshots = - new HashMap<>(); // Each snapshot purge operation does three things: // 1. Update the deep clean flag for the next active snapshot (So that it can be @@ -92,37 +96,36 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // There is no need to take lock for snapshot purge as of now. We can simply rely on OMStateMachine // because it executes transaction sequentially. for (String snapTableKey : snapshotDbKeys) { - SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable().get(snapTableKey); + SnapshotInfo fromSnapshot = getUpdatedSnapshotInfo(snapTableKey, omMetadataManager); if (fromSnapshot == null) { // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + "Snapshot purge request.", snapTableKey); continue; } - - SnapshotInfo nextSnapshot = - SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); // Step 1: Update the deep clean flag for the next active snapshot - updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex, updatedSnapInfos); + updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex); // Step 2: Update the snapshot chain. - updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, - updatedPathPreviousAndGlobalSnapshots); - // Remove and close snapshot's RocksDB instance from SnapshotCache. - omSnapshotManager.invalidateCacheEntry(fromSnapshot.getSnapshotId()); - // Step 3: Purge the snapshot from SnapshotInfoTable cache. + updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex); + // Step 3: Purge the snapshot from SnapshotInfoTable cache and also remove from the map. omMetadataManager.getSnapshotInfoTable() .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); + updatedSnapshotInfos.remove(fromSnapshot.getTableKey()); + } + // Update the snapshotInfo lastTransactionInfo. + for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), + CacheValue.get(termIndex.getIndex(), snapshotInfo)); } - omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), - snapshotDbKeys, updatedSnapInfos, - updatedPathPreviousAndGlobalSnapshots); + omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); omMetrics.incNumSnapshotPurges(); - LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating deep clean flags for " + - "snapshots: {} and global and previous for snapshots:{}.", - snapshotPurgeRequest, updatedSnapInfos.keySet(), updatedPathPreviousAndGlobalSnapshots.keySet()); + LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating snapshots:{}.", + snapshotPurgeRequest, updatedSnapshotInfos); } catch (IOException ex) { omClientResponse = new OMSnapshotPurgeResponse( createErrorOMResponse(omResponse, ex)); @@ -133,9 +136,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn return omClientResponse; } - private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, - OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos) throws IOException { + private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, + long trxnLogIndex) throws IOException { if (snapInfo != null) { // Setting next snapshot deep clean to false, Since the // current snapshot is deleted. We can potentially @@ -145,7 +147,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapInfo.getTableKey()), CacheValue.get(trxnLogIndex, snapInfo)); - updatedSnapInfos.put(snapInfo.getTableKey(), snapInfo); + updatedSnapshotInfos.put(snapInfo.getTableKey(), snapInfo); } } @@ -158,8 +160,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, private void updateSnapshotChainAndCache( OmMetadataManagerImpl metadataManager, SnapshotInfo snapInfo, - long trxnLogIndex, - Map updatedPathPreviousAndGlobalSnapshots + long trxnLogIndex ) throws IOException { if (snapInfo == null) { return; @@ -198,43 +199,36 @@ private void updateSnapshotChainAndCache( } SnapshotInfo nextPathSnapInfo = - nextPathSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextPathSnapshotKey) : null; - - SnapshotInfo nextGlobalSnapInfo = - nextGlobalSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextGlobalSnapshotKey) : null; + nextPathSnapshotKey != null ? getUpdatedSnapshotInfo(nextPathSnapshotKey, metadataManager) : null; - // Updates next path snapshot's previous snapshot ID if (nextPathSnapInfo != null) { nextPathSnapInfo.setPathPreviousSnapshotId(snapInfo.getPathPreviousSnapshotId()); metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(nextPathSnapInfo.getTableKey()), CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); } - // Updates next global snapshot's previous snapshot ID - // If both next global and path snapshot are same, it may overwrite - // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check - // will prevent it. - if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && - nextGlobalSnapInfo.getSnapshotId().equals(nextPathSnapInfo.getSnapshotId())) { - nextPathSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } else if (nextGlobalSnapInfo != null) { - nextGlobalSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); + SnapshotInfo nextGlobalSnapInfo = + nextGlobalSnapshotKey != null ? getUpdatedSnapshotInfo(nextGlobalSnapshotKey, metadataManager) : null; + + if (nextGlobalSnapInfo != null) { + nextGlobalSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(nextGlobalSnapInfo.getTableKey()), CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); } snapshotChainManager.deleteSnapshot(snapInfo); } + + private SnapshotInfo getUpdatedSnapshotInfo(String snapshotTableKey, OMMetadataManager omMetadataManager) + throws IOException { + SnapshotInfo snapshotInfo = updatedSnapshotInfos.get(snapshotTableKey); + + if (snapshotInfo == null) { + snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + updatedSnapshotInfos.put(snapshotTableKey, snapshotInfo); + } + return snapshotInfo; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index 8341f875504..8cf0579647c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -25,6 +25,8 @@ import java.io.IOException; import java.nio.file.InvalidPathException; + +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -32,6 +34,7 @@ import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -75,6 +78,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { String volumeName = renameSnapshotRequest.getVolumeName(); String bucketName = renameSnapshotRequest.getBucketName(); + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this); + volumeName = resolvedBucket.realVolume(); + bucketName = resolvedBucket.realBucket(); // Permission check UserGroupInformation ugi = createUGIForApi(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 01dbb5ba1e0..a22775107b9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -20,11 +20,15 @@ import java.io.IOException; import java.nio.file.InvalidPathException; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -55,6 +59,7 @@ .VolumeInfo; import org.apache.hadoop.util.Time; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK; @@ -160,6 +165,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn volumeList = omMetadataManager.getUserTable().get(dbUserKey); volumeList = addVolumeToOwnerList(volumeList, volume, owner, ozoneManager.getMaxUserVolumeCount(), transactionLogIndex); + + // Add default ACL for volume + List listOfAcls = getDefaultAclList(UserGroupInformation.createRemoteUser(owner), + ozoneManager.getConfiguration()); + // ACLs from VolumeArgs + if (omVolumeArgs.getAcls() != null) { + listOfAcls.addAll(omVolumeArgs.getAcls()); + } + // Remove the duplicates + listOfAcls = listOfAcls.stream().distinct().collect(Collectors.toList()); + omVolumeArgs.setAcls(listOfAcls); + createVolume(omMetadataManager, omVolumeArgs, volumeList, dbVolumeKey, dbUserKey, transactionLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java index 904b082e2d4..df74edfb1c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java @@ -129,7 +129,6 @@ protected static PersistedUserVolumeInfo addVolumeToOwnerList( * @param dbVolumeKey * @param dbUserKey * @param transactionLogIndex - * @throws IOException */ protected static void createVolume( final OMMetadataManager omMetadataManager, OmVolumeArgs omVolumeArgs, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index edb13f8cf98..782063d3244 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -48,12 +48,13 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; /** * Response for {@link OMDirectoriesPurgeRequestWithFSO} request. */ @CleanupTableInfo(cleanupTables = {DELETED_TABLE, DELETED_DIR_TABLE, - DIRECTORY_TABLE, FILE_TABLE}) + DIRECTORY_TABLE, FILE_TABLE, SNAPSHOT_INFO_TABLE}) public class OMDirectoriesPurgeResponseWithFSO extends OmKeyResponse { private static final Logger LOG = LoggerFactory.getLogger(OMDirectoriesPurgeResponseWithFSO.class); @@ -77,6 +78,10 @@ public OMDirectoriesPurgeResponseWithFSO(@Nonnull OMResponse omResponse, this.openKeyInfoMap = openKeyInfoMap; } + public OMDirectoriesPurgeResponseWithFSO(OMResponse omResponse) { + super(omResponse); + } + @Override public void addToDBBatch(OMMetadataManager metadataManager, BatchOperation batchOp) throws IOException { @@ -86,10 +91,7 @@ public void addToDBBatch(OMMetadataManager metadataManager, .getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted - rcFromSnapshotInfo = omSnapshotManager.getSnapshot( - fromSnapshotInfo.getVolumeName(), - fromSnapshotInfo.getBucketName(), - fromSnapshotInfo.getName())) { + rcFromSnapshotInfo = omSnapshotManager.getSnapshot(fromSnapshotInfo.getSnapshotId())) { OmSnapshot fromSnapshot = rcFromSnapshotInfo.get(); DBStore fromSnapshotStore = fromSnapshot.getMetadataManager() .getStore(); @@ -100,6 +102,7 @@ public void addToDBBatch(OMMetadataManager metadataManager, fromSnapshotStore.commitBatchOperation(writeBatch); } } + metadataManager.getSnapshotInfoTable().putWithBatch(batchOp, fromSnapshotInfo.getTableKey(), fromSnapshotInfo); } else { processPaths(metadataManager, batchOp); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index b16ba95d78f..cd2f7d190f4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -39,12 +39,13 @@ import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; import static org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveDeletedKeysResponse.createRepeatedOmKeyInfo; /** * Response for {@link OMKeyPurgeRequest} request. */ -@CleanupTableInfo(cleanupTables = {DELETED_TABLE}) +@CleanupTableInfo(cleanupTables = {DELETED_TABLE, SNAPSHOT_INFO_TABLE}) public class OMKeyPurgeResponse extends OmKeyResponse { private List purgeKeyList; private SnapshotInfo fromSnapshot; @@ -75,18 +76,13 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, if (fromSnapshot != null) { OmSnapshotManager omSnapshotManager = - ((OmMetadataManagerImpl) omMetadataManager) - .getOzoneManager().getOmSnapshotManager(); + ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.getSnapshot( - fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName(), - fromSnapshot.getName())) { + omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); - DBStore fromSnapshotStore = - fromOmSnapshot.getMetadataManager().getStore(); + DBStore fromSnapshotStore = fromOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. try (BatchOperation writeBatch = fromSnapshotStore.initBatchOperation()) { @@ -95,6 +91,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, fromSnapshotStore.commitBatchOperation(writeBatch); } } + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); } else { processKeys(batchOperation, omMetadataManager); processKeysToUpdate(batchOperation, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java new file mode 100644 index 00000000000..10181c9468f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.key.OmKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for delete object tagging request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class S3DeleteObjectTaggingResponse extends OmKeyResponse { + + private OmKeyInfo omKeyInfo; + + public S3DeleteObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo) { + super(omResponse); + this.omKeyInfo = omKeyInfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3DeleteObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, + omMetadataManager.getOzoneKey( + omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), + omKeyInfo.getKeyName()), + omKeyInfo + ); + } + + protected OmKeyInfo getOmKeyInfo() { + return omKeyInfo; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..bb42668ad05 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; + +/** + * Response for delete object tagging request for FSO bucket. + */ +@CleanupTableInfo(cleanupTables = {FILE_TABLE}) +public class S3DeleteObjectTaggingResponseWithFSO extends S3DeleteObjectTaggingResponse { + + private long volumeId; + private long bucketId; + + public S3DeleteObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, + @Nonnull long volumeId, + @Nonnull long bucketId) { + super(omResponse, omKeyInfo); + this.volumeId = volumeId; + this.bucketId = bucketId; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3DeleteObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName()); + omMetadataManager.getKeyTable(getBucketLayout()) + .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java new file mode 100644 index 00000000000..2acefe2ec6e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.key.OmKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for put object tagging request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class S3PutObjectTaggingResponse extends OmKeyResponse { + + private OmKeyInfo omKeyInfo; + + public S3PutObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyinfo) { + super(omResponse); + this.omKeyInfo = omKeyinfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3PutObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, + omMetadataManager.getOzoneKey( + omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), + omKeyInfo.getKeyName()), + omKeyInfo + ); + } + + protected OmKeyInfo getOmKeyInfo() { + return omKeyInfo; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..6152fbabe89 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; + +/** + * Response for put object tagging request for FSO bucket. + */ +@CleanupTableInfo(cleanupTables = {FILE_TABLE}) +public class S3PutObjectTaggingResponseWithFSO extends S3PutObjectTaggingResponse { + + private long volumeId; + private long bucketId; + + public S3PutObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, + @Nonnull long volumeId, + @Nonnull long bucketId) { + super(omResponse, omKeyInfo); + this.volumeId = volumeId; + this.bucketId = bucketId; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3PutObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName()); + omMetadataManager.getKeyTable(getBucketLayout()) + .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java new file mode 100644 index 00000000000..9a104c4663a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +/** + * Package contains classes related to S3 tagging responses. + */ +package org.apache.hadoop.ozone.om.response.s3.tagging; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index 3726faacfd7..7d1b7f237b2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -40,6 +40,7 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; /** * Response for OMSnapshotMoveDeletedKeysRequest. @@ -91,19 +92,13 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, .getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.getSnapshot( - fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName(), - fromSnapshot.getName())) { + omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); if (nextSnapshot != null) { try (ReferenceCounted - rcOmNextSnapshot = omSnapshotManager.getSnapshot( - nextSnapshot.getVolumeName(), - nextSnapshot.getBucketName(), - nextSnapshot.getName())) { + rcOmNextSnapshot = omSnapshotManager.getSnapshot(nextSnapshot.getSnapshotId())) { OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); RDBStore nextSnapshotStore = @@ -139,6 +134,11 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, } } + // Flush snapshot info to rocksDB. + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); + if (nextSnapshot != null) { + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, nextSnapshot.getTableKey(), nextSnapshot); + } } private void deleteDirsFromSnapshot(BatchOperation batchOp, @@ -200,8 +200,7 @@ private void processKeys(BatchOperation batchOp, } for (SnapshotMoveKeyInfos dBKey : nextDBKeysList) { - RepeatedOmKeyInfo omKeyInfos = - createRepeatedOmKeyInfo(dBKey, metadataManager); + RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(dBKey, metadataManager); if (omKeyInfos == null) { continue; } @@ -224,36 +223,5 @@ public static RepeatedOmKeyInfo createRepeatedOmKeyInfo( return result; } - - private RepeatedOmKeyInfo createRepeatedOmKeyInfo( - SnapshotMoveKeyInfos snapshotMoveKeyInfos, - OMMetadataManager metadataManager) throws IOException { - String dbKey = snapshotMoveKeyInfos.getKey(); - List keyInfoList = snapshotMoveKeyInfos.getKeyInfosList(); - // When older version of keys are moved to the next snapshot's deletedTable - // The newer version might also be in the next snapshot's deletedTable and - // it might overwrite. This is to avoid that and also avoid having - // orphans blocks. - RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); - - for (KeyInfo keyInfo : keyInfoList) { - OmKeyInfo omKeyInfo = OmKeyInfo.getFromProtobuf(keyInfo); - if (result == null) { - result = new RepeatedOmKeyInfo(omKeyInfo); - } else if (!isSameAsLatestOmKeyInfo(omKeyInfo, result)) { - result.addOmKeyInfo(omKeyInfo); - } - } - - return result; - } - - private boolean isSameAsLatestOmKeyInfo(OmKeyInfo omKeyInfo, - RepeatedOmKeyInfo result) { - int size = result.getOmKeyInfoList().size(); - assert size > 0; - OmKeyInfo keyInfoFromRepeated = result.getOmKeyInfoList().get(size - 1); - return omKeyInfo.equals(keyInfoFromRepeated); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java new file mode 100644 index 00000000000..b06570afb14 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.response.snapshot; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; + +/** + * Response for OMSnapshotMoveDeletedKeysRequest. + */ +@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) +public class OMSnapshotMoveTableKeysResponse extends OMClientResponse { + + private SnapshotInfo fromSnapshot; + private SnapshotInfo nextSnapshot; + private List deletedKeys; + private List renameKeysList; + private List deletedDirs; + + public OMSnapshotMoveTableKeysResponse(OMResponse omResponse, + @Nonnull SnapshotInfo fromSnapshot, SnapshotInfo nextSnapshot, + List deletedKeys, + List deletedDirs, + List renamedKeys) { + super(omResponse); + this.fromSnapshot = fromSnapshot; + this.nextSnapshot = nextSnapshot; + this.deletedKeys = deletedKeys; + this.renameKeysList = renamedKeys; + this.deletedDirs = deletedDirs; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMSnapshotMoveTableKeysResponse(@Nonnull OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager) + .getOzoneManager().getOmSnapshotManager(); + + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { + + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); + + if (nextSnapshot != null) { + try (ReferenceCounted + rcOmNextSnapshot = omSnapshotManager.getSnapshot(nextSnapshot.getSnapshotId())) { + + OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); + RDBStore nextSnapshotStore = (RDBStore) nextOmSnapshot.getMetadataManager().getStore(); + // Init Batch Operation for snapshot db. + try (BatchOperation writeBatch = nextSnapshotStore.initBatchOperation()) { + addKeysToNextSnapshot(writeBatch, nextOmSnapshot.getMetadataManager()); + nextSnapshotStore.commitBatchOperation(writeBatch); + nextSnapshotStore.getDb().flushWal(true); + nextSnapshotStore.getDb().flush(); + } + } + } else { + // Handle the case where there is no next Snapshot. + addKeysToNextSnapshot(batchOperation, omMetadataManager); + } + + // Update From Snapshot Deleted Table. + RDBStore fromSnapshotStore = (RDBStore) fromOmSnapshot.getMetadataManager().getStore(); + try (BatchOperation fromSnapshotBatchOp = fromSnapshotStore.initBatchOperation()) { + deleteKeysFromSnapshot(fromSnapshotBatchOp, fromOmSnapshot.getMetadataManager()); + fromSnapshotStore.commitBatchOperation(fromSnapshotBatchOp); + fromSnapshotStore.getDb().flushWal(true); + fromSnapshotStore.getDb().flush(); + } + } + + // Flush snapshot info to rocksDB. + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); + if (nextSnapshot != null) { + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, nextSnapshot.getTableKey(), nextSnapshot); + } + } + + private void deleteKeysFromSnapshot(BatchOperation batchOp, OMMetadataManager fromSnapshotMetadataManager) + throws IOException { + for (SnapshotMoveKeyInfos deletedOmKeyInfo : deletedKeys) { + // Delete keys from current snapshot that are moved to next snapshot. + fromSnapshotMetadataManager.getDeletedTable().deleteWithBatch(batchOp, deletedOmKeyInfo.getKey()); + } + + // Delete rename keys from current snapshot that are moved to next snapshot. + for (HddsProtos.KeyValue renameEntry : renameKeysList) { + fromSnapshotMetadataManager.getSnapshotRenamedTable().deleteWithBatch(batchOp, renameEntry.getKey()); + } + + // Delete deletedDir from current snapshot that are moved to next snapshot. + for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { + fromSnapshotMetadataManager.getDeletedDirTable().deleteWithBatch(batchOp, deletedDirInfo.getKey()); + } + + } + + private void addKeysToNextSnapshot(BatchOperation batchOp, OMMetadataManager metadataManager) throws IOException { + + // Add renamed keys to the next snapshot or active DB. + for (HddsProtos.KeyValue renameEntry : renameKeysList) { + metadataManager.getSnapshotRenamedTable().putWithBatch(batchOp, renameEntry.getKey(), renameEntry.getValue()); + } + // Add deleted keys to the next snapshot or active DB. + for (SnapshotMoveKeyInfos deletedKeyInfo : deletedKeys) { + RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(deletedKeyInfo, + metadataManager); + metadataManager.getDeletedTable().putWithBatch(batchOp, deletedKeyInfo.getKey(), omKeyInfos); + } + // Add deleted dir keys to the next snapshot or active DB. + for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { + metadataManager.getDeletedDirTable().putWithBatch(batchOp, deletedDirInfo.getKey(), + OmKeyInfo.getFromProtobuf(deletedDirInfo.getKeyInfosList().get(0))); + } + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index ea9e68cc9ad..81a020653f7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -49,18 +49,15 @@ public class OMSnapshotPurgeResponse extends OMClientResponse { LoggerFactory.getLogger(OMSnapshotPurgeResponse.class); private final List snapshotDbKeys; private final Map updatedSnapInfos; - private final Map updatedPreviousAndGlobalSnapInfos; public OMSnapshotPurgeResponse( @Nonnull OMResponse omResponse, @Nonnull List snapshotDbKeys, - Map updatedSnapInfos, - Map updatedPreviousAndGlobalSnapInfos + Map updatedSnapInfos ) { super(omResponse); this.snapshotDbKeys = snapshotDbKeys; this.updatedSnapInfos = updatedSnapInfos; - this.updatedPreviousAndGlobalSnapInfos = updatedPreviousAndGlobalSnapInfos; } /** @@ -72,7 +69,6 @@ public OMSnapshotPurgeResponse(@Nonnull OMResponse omResponse) { checkStatusNotOK(); this.snapshotDbKeys = null; this.updatedSnapInfos = null; - this.updatedPreviousAndGlobalSnapInfos = null; } @Override @@ -82,8 +78,6 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) omMetadataManager; updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); - updateSnapInfo(metadataManager, batchOperation, - updatedPreviousAndGlobalSnapInfos); for (String dbKey: snapshotDbKeys) { // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager @@ -96,8 +90,15 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, continue; } + // Remove and close snapshot's RocksDB instance from SnapshotCache. + ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager() + .invalidateCacheEntry(snapshotInfo.getSnapshotId()); + // Remove the snapshot from snapshotId to snapshotTableKey map. + ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager() + .removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId()); // Delete Snapshot checkpoint directory. deleteCheckpointDirectory(omMetadataManager, snapshotInfo); + // Delete snapshotInfo from the table. omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 429e286287c..e60180938ff 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -19,6 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundService; @@ -30,15 +31,14 @@ import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -48,8 +48,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.util.Preconditions; import java.io.IOException; @@ -75,7 +73,7 @@ public abstract class AbstractKeyDeletingService extends BackgroundService private final OzoneManager ozoneManager; private final ScmBlockLocationProtocol scmClient; - private static ClientId clientId = ClientId.randomId(); + private final ClientId clientId = ClientId.randomId(); private final AtomicLong deletedDirsCount; private final AtomicLong movedDirsCount; private final AtomicLong movedFilesCount; @@ -99,7 +97,7 @@ public AbstractKeyDeletingService(String serviceName, long interval, protected int processKeyDeletes(List keyBlocksList, KeyManager manager, HashMap keysToModify, - String snapTableKey) throws IOException { + String snapTableKey, UUID expectedPreviousSnapshotId) throws IOException { long startTime = Time.monotonicNow(); int delCount = 0; @@ -122,15 +120,17 @@ protected int processKeyDeletes(List keyBlocksList, startTime = Time.monotonicNow(); if (isRatisEnabled()) { delCount = submitPurgeKeysRequest(blockDeletionResults, - keysToModify, snapTableKey); + keysToModify, snapTableKey, expectedPreviousSnapshotId); } else { // TODO: Once HA and non-HA paths are merged, we should have // only one code path here. Purge keys should go through an // OMRequest model. delCount = deleteAllKeys(blockDeletionResults, manager); } - LOG.info("Blocks for {} (out of {}) keys are deleted from DB in {} ms", - delCount, blockDeletionResults.size(), Time.monotonicNow() - startTime); + int limit = ozoneManager.getConfiguration().getInt(OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK, + OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT); + LOG.info("Blocks for {} (out of {}) keys are deleted from DB in {} ms. Limit per task is {}.", + delCount, blockDeletionResults.size(), Time.monotonicNow() - startTime, limit); } return delCount; } @@ -174,7 +174,7 @@ private int deleteAllKeys(List results, * @param keysToModify Updated list of RepeatedOmKeyInfo */ private int submitPurgeKeysRequest(List results, - HashMap keysToModify, String snapTableKey) { + HashMap keysToModify, String snapTableKey, UUID expectedPreviousSnapshotId) { Map, List> purgeKeysMapPerBucket = new HashMap<>(); @@ -205,6 +205,12 @@ private int submitPurgeKeysRequest(List results, if (snapTableKey != null) { purgeKeysRequest.setSnapshotTableKey(snapTableKey); } + OzoneManagerProtocolProtos.NullableUUID.Builder expectedPreviousSnapshotNullableUUID = + OzoneManagerProtocolProtos.NullableUUID.newBuilder(); + if (expectedPreviousSnapshotId != null) { + expectedPreviousSnapshotNullableUUID.setUuid(HddsUtils.toProtobuf(expectedPreviousSnapshotId)); + } + purgeKeysRequest.setExpectedPreviousSnapshotID(expectedPreviousSnapshotNullableUUID.build()); // Add keys to PurgeKeysRequest bucket wise. for (Map.Entry, List> entry : @@ -247,32 +253,15 @@ private int submitPurgeKeysRequest(List results, // Submit PurgeKeys request to OM try { - RaftClientRequest raftClientRequest = - createRaftClientRequestForPurge(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, - raftClientRequest); + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); } catch (ServiceException e) { - LOG.error("PurgeKey request failed. Will retry at next run."); + LOG.error("PurgeKey request failed. Will retry at next run.", e); return 0; } return deletedCount; } - protected RaftClientRequest createRaftClientRequestForPurge( - OMRequest omRequest) { - return RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) - .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage( - Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - } - /** * Parse Volume and Bucket Name from ObjectKey and add it to given map of * keys to be purged per bucket. @@ -293,13 +282,21 @@ private void addToMap(Map, List> map, String object } protected void submitPurgePaths(List requests, - String snapTableKey) { + String snapTableKey, + UUID expectedPreviousSnapshotId, long rnCnt) { OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); if (snapTableKey != null) { purgeDirRequest.setSnapshotTableKey(snapTableKey); } + OzoneManagerProtocolProtos.NullableUUID.Builder expectedPreviousSnapshotNullableUUID = + OzoneManagerProtocolProtos.NullableUUID.newBuilder(); + if (expectedPreviousSnapshotId != null) { + expectedPreviousSnapshotNullableUUID.setUuid(HddsUtils.toProtobuf(expectedPreviousSnapshotId)); + } + purgeDirRequest.setExpectedPreviousSnapshotID(expectedPreviousSnapshotNullableUUID.build()); + purgeDirRequest.addAllDeletedPath(requests); OzoneManagerProtocolProtos.OMRequest omRequest = @@ -311,17 +308,9 @@ protected void submitPurgePaths(List requests, // Submit Purge paths request to OM try { - if (isRatisEnabled()) { - RaftClientRequest raftClientRequest = - createRaftClientRequestForPurge(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, - raftClientRequest); - } else { - getOzoneManager().getOmServerProtocol() - .submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, rnCnt); } catch (ServiceException e) { - LOG.error("PurgePaths request failed. Will retry at next run."); + LOG.error("PurgePaths request failed. Will retry at next run.", e); } } @@ -413,8 +402,10 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, List> allSubDirList, List purgePathRequestList, String snapTableKey, long startTime, - int remainingBufLimit, KeyManager keyManager) { + int remainingBufLimit, KeyManager keyManager, + UUID expectedPreviousSnapshotId, long rnCnt) { + long limit = remainNum; // Optimization to handle delete sub-dir and keys to remove quickly // This case will be useful to handle when depth of directory is high int subdirDelNum = 0; @@ -435,6 +426,8 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } consumedSize += request.getSerializedSize(); purgePathRequestList.add(request); + // reduce remain count for self, sub-files, and sub-directories + remainNum = remainNum - 1; remainNum = remainNum - request.getDeletedSubFilesCount(); remainNum = remainNum - request.getMarkDeletedSubDirsCount(); // Count up the purgeDeletedDir, subDirs and subFiles @@ -453,20 +446,21 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } if (!purgePathRequestList.isEmpty()) { - submitPurgePaths(purgePathRequestList, snapTableKey); + submitPurgePaths(purgePathRequestList, snapTableKey, expectedPreviousSnapshotId, rnCnt); } if (dirNum != 0 || subDirNum != 0 || subFileNum != 0) { deletedDirsCount.addAndGet(dirNum + subdirDelNum); movedDirsCount.addAndGet(subDirNum - subdirDelNum); movedFilesCount.addAndGet(subFileNum); + long timeTakenInIteration = Time.monotonicNow() - startTime; LOG.info("Number of dirs deleted: {}, Number of sub-dir " + "deleted: {}, Number of sub-files moved:" + " {} to DeletedTable, Number of sub-dirs moved {} to " + - "DeletedDirectoryTable, iteration elapsed: {}ms," + + "DeletedDirectoryTable, limit per iteration: {}, iteration elapsed: {}ms, " + " totalRunCount: {}", - dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum), - Time.monotonicNow() - startTime, getRunCount()); + dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum), limit, + timeTakenInIteration, rnCnt); } return remainNum; } @@ -576,26 +570,6 @@ protected boolean isBufferLimitCrossed( return cLimit + increment >= maxLimit; } - protected SnapshotInfo getPreviousActiveSnapshot(SnapshotInfo snapInfo, - SnapshotChainManager chainManager, OmSnapshotManager omSnapshotManager) - throws IOException { - SnapshotInfo currSnapInfo = snapInfo; - while (chainManager.hasPreviousPathSnapshot( - currSnapInfo.getSnapshotPath(), currSnapInfo.getSnapshotId())) { - - UUID prevPathSnapshot = chainManager.previousPathSnapshot( - currSnapInfo.getSnapshotPath(), currSnapInfo.getSnapshotId()); - String tableKey = chainManager.getTableKey(prevPathSnapshot); - SnapshotInfo prevSnapInfo = omSnapshotManager.getSnapshotInfo(tableKey); - if (prevSnapInfo.getSnapshotStatus() == - SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { - return prevSnapInfo; - } - currSnapInfo = prevSnapInfo; - } - return null; - } - protected boolean isKeyReclaimable( Table previousKeyTable, Table renamedTable, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index c8703c3c4c6..05555439acf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -33,18 +34,23 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.util.Time; -import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT; @@ -70,24 +76,27 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { public static final Logger LOG = LoggerFactory.getLogger(DirectoryDeletingService.class); - private static ClientId clientId = ClientId.randomId(); - - // Use only a single thread for DirDeletion. Multiple threads would read - // or write to same tables and can send deletion requests for same key - // multiple times. - private static final int DIR_DELETING_CORE_POOL_SIZE = 1; + // Using multi thread for DirDeletion. Multiple threads would read + // from parent directory info from deleted directory table concurrently + // and send deletion requests. + private final int dirDeletingCorePoolSize; private static final int MIN_ERR_LIMIT_PER_TASK = 1000; // Number of items(dirs/files) to be batched in an iteration. private final long pathLimitPerTask; private final int ratisByteLimit; private final AtomicBoolean suspended; + private AtomicBoolean isRunningOnAOS; + + private final DeletedDirSupplier deletedDirSupplier; + + private AtomicInteger taskCount = new AtomicInteger(0); public DirectoryDeletingService(long interval, TimeUnit unit, long serviceTimeout, OzoneManager ozoneManager, - OzoneConfiguration configuration) { + OzoneConfiguration configuration, int dirDeletingServiceCorePoolSize) { super(DirectoryDeletingService.class.getSimpleName(), interval, unit, - DIR_DELETING_CORE_POOL_SIZE, serviceTimeout, ozoneManager, null); + dirDeletingServiceCorePoolSize, serviceTimeout, ozoneManager, null); this.pathLimitPerTask = configuration .getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK, OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT); @@ -98,6 +107,10 @@ public DirectoryDeletingService(long interval, TimeUnit unit, // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); this.suspended = new AtomicBoolean(false); + this.isRunningOnAOS = new AtomicBoolean(false); + this.dirDeletingCorePoolSize = dirDeletingServiceCorePoolSize; + deletedDirSupplier = new DeletedDirSupplier(); + taskCount.set(0); } private boolean shouldRun() { @@ -108,6 +121,14 @@ private boolean shouldRun() { return getOzoneManager().isLeaderReady() && !suspended.get(); } + public boolean isRunningOnAOS() { + return isRunningOnAOS.get(); + } + + public AtomicInteger getTaskCount() { + return taskCount; + } + /** * Suspend the service. */ @@ -127,11 +148,61 @@ public void resume() { @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new DirectoryDeletingService.DirDeletingTask()); + if (taskCount.get() > 0) { + LOG.info("{} Directory deleting task(s) already in progress.", + taskCount.get()); + return queue; + } + try { + deletedDirSupplier.reInitItr(); + } catch (IOException ex) { + LOG.error("Unable to get the iterator.", ex); + return queue; + } + taskCount.set(dirDeletingCorePoolSize); + for (int i = 0; i < dirDeletingCorePoolSize; i++) { + queue.add(new DirectoryDeletingService.DirDeletingTask(this)); + } return queue; } - private class DirDeletingTask implements BackgroundTask { + @Override + public void shutdown() { + super.shutdown(); + deletedDirSupplier.closeItr(); + } + + private final class DeletedDirSupplier { + private TableIterator> + deleteTableIterator; + + private synchronized Table.KeyValue get() + throws IOException { + if (deleteTableIterator.hasNext()) { + return deleteTableIterator.next(); + } + return null; + } + + private synchronized void closeItr() { + IOUtils.closeQuietly(deleteTableIterator); + deleteTableIterator = null; + } + + private synchronized void reInitItr() throws IOException { + closeItr(); + deleteTableIterator = + getOzoneManager().getMetadataManager().getDeletedDirTable() + .iterator(); + } + } + + private final class DirDeletingTask implements BackgroundTask { + private final DirectoryDeletingService directoryDeletingService; + + private DirDeletingTask(DirectoryDeletingService service) { + this.directoryDeletingService = service; + } @Override public int getPriority() { @@ -140,78 +211,94 @@ public int getPriority() { @Override public BackgroundTaskResult call() { - if (shouldRun()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Running DirectoryDeletingService"); - } - getRunCount().incrementAndGet(); - long dirNum = 0L; - long subDirNum = 0L; - long subFileNum = 0L; - long remainNum = pathLimitPerTask; - int consumedSize = 0; - List purgePathRequestList = new ArrayList<>(); - List> allSubDirList - = new ArrayList<>((int) remainNum); - - Table.KeyValue pendingDeletedDirInfo; - try (TableIterator> - deleteTableIterator = getOzoneManager().getMetadataManager(). - getDeletedDirTable().iterator()) { - - long startTime = Time.monotonicNow(); - while (remainNum > 0 && deleteTableIterator.hasNext()) { - pendingDeletedDirInfo = deleteTableIterator.next(); - // Do not reclaim if the directory is still being referenced by - // the previous snapshot. - if (previousSnapshotHasDir(pendingDeletedDirInfo)) { - continue; - } + try { + if (shouldRun()) { + isRunningOnAOS.set(true); + long rnCnt = getRunCount().incrementAndGet(); + if (LOG.isDebugEnabled()) { + LOG.debug("Running DirectoryDeletingService. {}", rnCnt); + } + long dirNum = 0L; + long subDirNum = 0L; + long subFileNum = 0L; + long remainNum = pathLimitPerTask; + int consumedSize = 0; + List purgePathRequestList = new ArrayList<>(); + List> allSubDirList = + new ArrayList<>((int) remainNum); + + Table.KeyValue pendingDeletedDirInfo; + // This is to avoid race condition b/w purge request and snapshot chain updation. For AOS taking the global + // snapshotId since AOS could process multiple buckets in one iteration. + try { + UUID expectedPreviousSnapshotId = + ((OmMetadataManagerImpl) getOzoneManager().getMetadataManager()).getSnapshotChainManager() + .getLatestGlobalSnapshotId(); - PurgePathRequest request = prepareDeleteDirRequest( - remainNum, pendingDeletedDirInfo.getValue(), - pendingDeletedDirInfo.getKey(), allSubDirList, - getOzoneManager().getKeyManager()); - if (isBufferLimitCrossed(ratisByteLimit, consumedSize, - request.getSerializedSize())) { - if (purgePathRequestList.size() != 0) { - // if message buffer reaches max limit, avoid sending further - remainNum = 0; + long startTime = Time.monotonicNow(); + while (remainNum > 0) { + pendingDeletedDirInfo = getPendingDeletedDirInfo(); + if (pendingDeletedDirInfo == null) { break; } - // if directory itself is having a lot of keys / files, - // reduce capacity to minimum level - remainNum = MIN_ERR_LIMIT_PER_TASK; - request = prepareDeleteDirRequest( - remainNum, pendingDeletedDirInfo.getValue(), + // Do not reclaim if the directory is still being referenced by + // the previous snapshot. + if (previousSnapshotHasDir(pendingDeletedDirInfo)) { + continue; + } + + PurgePathRequest request = prepareDeleteDirRequest(remainNum, + pendingDeletedDirInfo.getValue(), pendingDeletedDirInfo.getKey(), allSubDirList, getOzoneManager().getKeyManager()); + if (isBufferLimitCrossed(ratisByteLimit, consumedSize, + request.getSerializedSize())) { + if (purgePathRequestList.size() != 0) { + // if message buffer reaches max limit, avoid sending further + remainNum = 0; + break; + } + // if directory itself is having a lot of keys / files, + // reduce capacity to minimum level + remainNum = MIN_ERR_LIMIT_PER_TASK; + request = prepareDeleteDirRequest(remainNum, + pendingDeletedDirInfo.getValue(), + pendingDeletedDirInfo.getKey(), allSubDirList, + getOzoneManager().getKeyManager()); + } + consumedSize += request.getSerializedSize(); + purgePathRequestList.add(request); + // reduce remain count for self, sub-files, and sub-directories + remainNum = remainNum - 1; + remainNum = remainNum - request.getDeletedSubFilesCount(); + remainNum = remainNum - request.getMarkDeletedSubDirsCount(); + // Count up the purgeDeletedDir, subDirs and subFiles + if (request.getDeletedDir() != null && !request.getDeletedDir() + .isEmpty()) { + dirNum++; + } + subDirNum += request.getMarkDeletedSubDirsCount(); + subFileNum += request.getDeletedSubFilesCount(); } - consumedSize += request.getSerializedSize(); - purgePathRequestList.add(request); - remainNum = remainNum - request.getDeletedSubFilesCount(); - remainNum = remainNum - request.getMarkDeletedSubDirsCount(); - // Count up the purgeDeletedDir, subDirs and subFiles - if (request.getDeletedDir() != null - && !request.getDeletedDir().isEmpty()) { - dirNum++; - } - subDirNum += request.getMarkDeletedSubDirsCount(); - subFileNum += request.getDeletedSubFilesCount(); - } - - optimizeDirDeletesAndSubmitRequest( - remainNum, dirNum, subDirNum, subFileNum, - allSubDirList, purgePathRequestList, null, startTime, - ratisByteLimit - consumedSize, - getOzoneManager().getKeyManager()); + optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, subDirNum, + subFileNum, allSubDirList, purgePathRequestList, null, + startTime, ratisByteLimit - consumedSize, + getOzoneManager().getKeyManager(), expectedPreviousSnapshotId, + rnCnt); - } catch (IOException e) { - LOG.error("Error while running delete directories and files " + - "background task. Will retry at next run.", e); + } catch (IOException e) { + LOG.error( + "Error while running delete directories and files " + "background task. Will retry at next run.", + e); + } + isRunningOnAOS.set(false); + synchronized (directoryDeletingService) { + this.directoryDeletingService.notify(); + } } + } finally { + taskCount.getAndDecrement(); } - // place holder by returning empty results of this call back. return BackgroundTaskResult.EmptyTaskResult.newResult(); } @@ -224,12 +311,23 @@ private boolean previousSnapshotHasDir( getOzoneManager().getOmSnapshotManager(); OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) getOzoneManager().getMetadataManager(); - + SnapshotInfo previousSnapshotInfo = SnapshotUtils.getLatestSnapshotInfo(deletedDirInfo.getVolumeName(), + deletedDirInfo.getBucketName(), getOzoneManager(), metadataManager.getSnapshotChainManager()); + if (previousSnapshotInfo == null) { + return false; + } + // previous snapshot is not active or it has not been flushed to disk then don't process the key in this + // iteration. + if (previousSnapshotInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), + previousSnapshotInfo)) { + return true; + } try (ReferenceCounted rcLatestSnapshot = - metadataManager.getLatestActiveSnapshot( + omSnapshotManager.getSnapshot( deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), - omSnapshotManager)) { + previousSnapshotInfo.getName())) { if (rcLatestSnapshot != null) { String dbRenameKey = metadataManager @@ -250,8 +348,14 @@ private boolean previousSnapshotHasDir( String prevDbKey = prevDirTableDBKey == null ? metadataManager.getOzoneDeletePathDirKey(key) : prevDirTableDBKey; OmDirectoryInfo prevDirInfo = prevDirTable.get(prevDbKey); - return prevDirInfo != null && - prevDirInfo.getObjectID() == deletedDirInfo.getObjectID(); + //Checking if the previous snapshot in the chain hasn't changed while checking if the deleted directory is + // present in the previous snapshot. If the chain has changed, the deleted directory could have been moved + // to the newly created snapshot. + SnapshotInfo newPreviousSnapshotInfo = SnapshotUtils.getLatestSnapshotInfo(deletedDirInfo.getVolumeName(), + deletedDirInfo.getBucketName(), getOzoneManager(), metadataManager.getSnapshotChainManager()); + return (!Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId), + Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) || (prevDirInfo != null && + prevDirInfo.getObjectID() == deletedDirInfo.getObjectID()); } } @@ -259,4 +363,9 @@ private boolean previousSnapshotHasDir( } } + public KeyValue getPendingDeletedDirInfo() + throws IOException { + return deletedDirSupplier.get(); + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index c4285482872..9a4f74eba59 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -23,7 +23,9 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -42,9 +44,9 @@ import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; @@ -67,8 +69,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,11 +95,15 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private final Map exclusiveReplicatedSizeMap; private final Set completedExclusiveSizeSet; private final Map snapshotSeekMap; + private AtomicBoolean isRunningOnAOS; + private final boolean deepCleanSnapshots; + private final SnapshotChainManager snapshotChainManager; public KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, KeyManager manager, long serviceInterval, - long serviceTimeout, ConfigurationSource conf) { + long serviceTimeout, ConfigurationSource conf, + boolean deepCleanSnapshots) { super(KeyDeletingService.class.getSimpleName(), serviceInterval, TimeUnit.MILLISECONDS, KEY_DELETING_CORE_POOL_SIZE, serviceTimeout, ozoneManager, scmClient); @@ -114,6 +118,9 @@ public KeyDeletingService(OzoneManager ozoneManager, this.exclusiveReplicatedSizeMap = new HashMap<>(); this.completedExclusiveSizeSet = new HashSet<>(); this.snapshotSeekMap = new HashMap<>(); + this.isRunningOnAOS = new AtomicBoolean(false); + this.deepCleanSnapshots = deepCleanSnapshots; + this.snapshotChainManager = ((OmMetadataManagerImpl)manager.getMetadataManager()).getSnapshotChainManager(); } /** @@ -126,10 +133,14 @@ public AtomicLong getDeletedKeyCount() { return deletedKeyCount; } + public boolean isRunningOnAOS() { + return isRunningOnAOS.get(); + } + @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new KeyDeletingTask()); + queue.add(new KeyDeletingTask(this)); return queue; } @@ -172,7 +183,12 @@ public void setKeyLimitPerTask(int keyLimitPerTask) { * the blocks info in its deletedBlockLog), it removes these keys from the * DB. */ - private class KeyDeletingTask implements BackgroundTask { + private final class KeyDeletingTask implements BackgroundTask { + private final KeyDeletingService deletingService; + + private KeyDeletingTask(KeyDeletingService service) { + this.deletingService = service; + } @Override public int getPriority() { @@ -186,7 +202,7 @@ public BackgroundTaskResult call() { if (shouldRun()) { final long run = getRunCount().incrementAndGet(); LOG.debug("Running KeyDeletingService {}", run); - + isRunningOnAOS.set(true); int delCount = 0; try { // TODO: [SNAPSHOT] HDDS-7968. Reclaim eligible key blocks in @@ -194,7 +210,9 @@ public BackgroundTaskResult call() { // doesn't have enough entries left. // OM would have to keep track of which snapshot the key is coming // from if the above would be done inside getPendingDeletionKeys(). - + // This is to avoid race condition b/w purge request and snapshot chain update. For AOS taking the global + // snapshotId since AOS could process multiple buckets in one iteration. + UUID expectedPreviousSnapshotId = snapshotChainManager.getLatestGlobalSnapshotId(); PendingKeysDeletion pendingKeysDeletion = manager .getPendingDeletionKeys(getKeyLimitPerTask()); List keyBlocksList = pendingKeysDeletion @@ -202,7 +220,7 @@ public BackgroundTaskResult call() { if (keyBlocksList != null && !keyBlocksList.isEmpty()) { delCount = processKeyDeletes(keyBlocksList, getOzoneManager().getKeyManager(), - pendingKeysDeletion.getKeysToModify(), null); + pendingKeysDeletion.getKeysToModify(), null, expectedPreviousSnapshotId); deletedKeyCount.addAndGet(delCount); } } catch (IOException e) { @@ -211,7 +229,7 @@ public BackgroundTaskResult call() { } try { - if (delCount < keyLimitPerTask) { + if (deepCleanSnapshots && delCount < keyLimitPerTask) { processSnapshotDeepClean(delCount); } } catch (Exception e) { @@ -220,6 +238,11 @@ public BackgroundTaskResult call() { } } + isRunningOnAOS.set(false); + synchronized (deletingService) { + this.deletingService.notify(); + } + // By design, no one cares about the results of this call back. return EmptyTaskResult.newResult(); } @@ -242,15 +265,23 @@ private void processSnapshotDeepClean(int delCount) while (delCount < keyLimitPerTask && iterator.hasNext()) { List keysToPurge = new ArrayList<>(); HashMap keysToModify = new HashMap<>(); - SnapshotInfo currSnapInfo = iterator.next().getValue(); - + SnapshotInfo currSnapInfo = snapshotInfoTable.get(iterator.next().getKey()); // Deep clean only on active snapshot. Deleted Snapshots will be // cleaned up by SnapshotDeletingService. - if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + if (currSnapInfo == null || currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || currSnapInfo.getDeepClean()) { continue; } + SnapshotInfo prevSnapInfo = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, + currSnapInfo); + if (prevSnapInfo != null && + (prevSnapInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), + prevSnapInfo))) { + continue; + } + try (ReferenceCounted rcCurrOmSnapshot = omSnapshotManager.getSnapshot( currSnapInfo.getVolumeName(), @@ -279,13 +310,13 @@ private void processSnapshotDeepClean(int delCount) } String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( - currSnapInfo, snapChainManager, omSnapshotManager); + SnapshotInfo previousSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, + currSnapInfo); SnapshotInfo previousToPrevSnapshot = null; if (previousSnapshot != null) { - previousToPrevSnapshot = getPreviousActiveSnapshot( - previousSnapshot, snapChainManager, omSnapshotManager); + previousToPrevSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, + previousSnapshot); } Table previousKeyTable = null; @@ -414,7 +445,8 @@ private void processSnapshotDeepClean(int delCount) if (!keysToPurge.isEmpty()) { processKeyDeletes(keysToPurge, currOmSnapshot.getKeyManager(), - keysToModify, currSnapInfo.getTableKey()); + keysToModify, currSnapInfo.getTableKey(), + Optional.ofNullable(previousSnapshot).map(SnapshotInfo::getSnapshotId).orElse(null)); } } finally { IOUtils.closeQuietly(rcPrevOmSnapshot, rcPrevToPrevOmSnapshot); @@ -483,24 +515,7 @@ private void updateDeepCleanedSnapshots(List deepCleanedSnapshots) { public void submitRequest(OMRequest omRequest, ClientId clientId) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = getOzoneManager().getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(getRunCount().get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - getOzoneManager().getOmServerProtocol() - .submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(getOzoneManager(), omRequest, clientId, getRunCount().get()); } catch (ServiceException e) { LOG.error("Snapshot deep cleaning request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java index 1199a0c6506..f1084155e98 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/MultipartUploadCleanupService.java @@ -29,16 +29,13 @@ import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -208,24 +205,7 @@ private OMRequest createRequest(List private void submitRequest(OMRequest omRequest) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - ozoneManager.getOmServerProtocol().submitRequest(null, - omRequest); - } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); } catch (ServiceException e) { LOG.error("Expired multipart info delete request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java index 45112037c1b..fc6fe2b0c45 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock; @@ -55,12 +54,11 @@ import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController; import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Policy; import org.apache.hadoop.ozone.om.multitenant.MultiTenantAccessController.Role; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetRangerServiceVersionRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -375,19 +373,6 @@ long getRangerOzoneServicePolicyVersion() throws IOException { return policyVersion; } - private RaftClientRequest newRaftClientRequest(OMRequest omRequest) { - return RaftClientRequest.newBuilder() - .setClientId(CLIENT_ID) - .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId()) - .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage( - Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - } - public void setOMDBRangerServiceVersion(long version) throws ServiceException { // OM DB update goes through Ratis @@ -402,12 +387,10 @@ public void setOMDBRangerServiceVersion(long version) .build(); try { - RaftClientRequest raftClientRequest = newRaftClientRequest(omRequest); - ozoneManager.getOmRatisServer().submitRequest(omRequest, - raftClientRequest); + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, CLIENT_ID, runCount.get()); } catch (ServiceException e) { LOG.error("SetRangerServiceVersion request failed. " - + "Will retry at next run."); + + "Will retry at next run.", e); throw e; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java index ab556230194..d185d872908 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java @@ -25,14 +25,15 @@ import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; +import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.ExpiredOpenKeys; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteOpenKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -41,8 +42,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,8 +52,11 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; + /** * This is the background service to delete hanging open keys. * Scan the metadata of om periodically to get @@ -80,7 +82,7 @@ public class OpenKeyCleanupService extends BackgroundService { private final Duration leaseThreshold; private final int cleanupLimitPerTask; private final AtomicLong submittedOpenKeyCount; - private final AtomicLong runCount; + private final AtomicLong callId; private final AtomicBoolean suspended; public OpenKeyCleanupService(long interval, TimeUnit unit, long timeout, @@ -115,20 +117,10 @@ public OpenKeyCleanupService(long interval, TimeUnit unit, long timeout, OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_LIMIT_PER_TASK_DEFAULT); this.submittedOpenKeyCount = new AtomicLong(0); - this.runCount = new AtomicLong(0); + this.callId = new AtomicLong(0); this.suspended = new AtomicBoolean(false); } - /** - * Returns the number of times this Background service has run. - * - * @return Long, run count. - */ - @VisibleForTesting - public long getRunCount() { - return runCount.get(); - } - /** * Suspend the service (for testing). */ @@ -192,7 +184,7 @@ public BackgroundTaskResult call() throws Exception { if (!shouldRun()) { return BackgroundTaskResult.EmptyTaskResult.newResult(); } - runCount.incrementAndGet(); + LOG.debug("Running OpenKeyCleanupService"); long startTime = Time.monotonicNow(); final ExpiredOpenKeys expiredOpenKeys; try { @@ -215,6 +207,16 @@ public BackgroundTaskResult call() throws Exception { final OMResponse response = submitRequest(omRequest); if (response != null && response.getSuccess()) { ozoneManager.getMetrics().incNumOpenKeysCleaned(numOpenKeys); + if (LOG.isDebugEnabled()) { + StringBuilder sb = new StringBuilder(); + for (OpenKeyBucket.Builder openKey : openKeyBuckets) { + sb.append(openKey.getVolumeName() + OZONE_URI_DELIMITER + openKey.getBucketName() + ": ") + .append(openKey.getKeysList().stream().map(OzoneManagerProtocolProtos.OpenKey::getName) + .collect(Collectors.toList())) + .append("\n"); + } + LOG.debug("Non-hsync'ed openKeys being deleted in current iteration: \n" + sb); + } } } @@ -227,15 +229,23 @@ public BackgroundTaskResult call() throws Exception { final OMResponse response = submitRequest(createCommitKeyRequest(b)); if (response != null && response.getSuccess()) { ozoneManager.getMetrics().incNumOpenKeysHSyncCleaned(); + if (LOG.isDebugEnabled()) { + StringBuilder sb = new StringBuilder(); + for (CommitKeyRequest.Builder openKey : hsyncKeys) { + sb.append(openKey.getKeyArgs().getVolumeName() + OZONE_URI_DELIMITER + + openKey.getKeyArgs().getBucketName() + ": ") + .append(openKey.getKeyArgs().getKeyName()) + .append(", "); + } + LOG.debug("hsync'ed openKeys committed in current iteration: \n" + sb); + } } }); } - if (LOG.isDebugEnabled()) { - LOG.debug("Number of expired open keys submitted for deletion: {}," - + " for commit: {}, elapsed time: {}ms", - numOpenKeys, numHsyncKeys, Time.monotonicNow() - startTime); - } + LOG.info("Number of expired open keys submitted for deletion: {}," + + " for commit: {}, cleanupLimit: {}, elapsed time: {}ms", + numOpenKeys, numHsyncKeys, cleanupLimitPerTask, Time.monotonicNow() - startTime); final int numKeys = numOpenKeys + numHsyncKeys; submittedOpenKeyCount.addAndGet(numKeys); return () -> numKeys; @@ -247,6 +257,7 @@ private OMRequest createCommitKeyRequest( .setCmdType(Type.CommitKey) .setCommitKeyRequest(request) .setClientId(clientId.toString()) + .setVersion(ClientVersion.CURRENT_VERSION) .build(); } @@ -268,24 +279,7 @@ private OMRequest createDeleteOpenKeysRequest( private OMResponse submitRequest(OMRequest omRequest) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(runCount.get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - return server.submitRequest(omRequest, raftClientRequest); - } else { - return ozoneManager.getOmServerProtocol().submitRequest( - null, omRequest); - } + return OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, callId.incrementAndGet()); } catch (ServiceException e) { LOG.error("Open key " + omRequest.getCmdType() + " request failed. Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java index b3e64c98c5d..1a29ee8d96b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java @@ -26,6 +26,7 @@ import java.io.UncheckedIOException; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -48,15 +49,13 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.codehaus.jackson.map.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,27 +73,31 @@ public class QuotaRepairTask { private static final int TASK_THREAD_CNT = 3; private static final AtomicBoolean IN_PROGRESS = new AtomicBoolean(false); private static final RepairStatus REPAIR_STATUS = new RepairStatus(); + private static final AtomicLong RUN_CNT = new AtomicLong(0); private final OzoneManager om; - private final AtomicLong runCount = new AtomicLong(0); private ExecutorService executor; public QuotaRepairTask(OzoneManager ozoneManager) { this.om = ozoneManager; } - public CompletableFuture repair() throws Exception { + public CompletableFuture repair() throws IOException { + return repair(Collections.emptyList()); + } + + public CompletableFuture repair(List buckets) throws IOException { // lock in progress operation and reject any other if (!IN_PROGRESS.compareAndSet(false, true)) { LOG.info("quota repair task already running"); - return CompletableFuture.supplyAsync(() -> false); + throw new OMException("Quota repair is already running", OMException.ResultCodes.QUOTA_ERROR); } - REPAIR_STATUS.reset(runCount.get() + 1); - return CompletableFuture.supplyAsync(() -> repairTask()); + REPAIR_STATUS.reset(RUN_CNT.get() + 1); + return CompletableFuture.supplyAsync(() -> repairTask(buckets)); } public static String getStatus() { return REPAIR_STATUS.toString(); } - private boolean repairTask() { + private boolean repairTask(List buckets) { LOG.info("Starting quota repair task {}", REPAIR_STATUS); OMMetadataManager activeMetaManager = null; try { @@ -104,7 +107,7 @@ private boolean repairTask() { = OzoneManagerProtocolProtos.QuotaRepairRequest.newBuilder(); // repair active db activeMetaManager = createActiveDBCheckpoint(om.getMetadataManager(), om.getConfiguration()); - repairActiveDb(activeMetaManager, builder); + repairActiveDb(activeMetaManager, builder, buckets); // TODO: repair snapshots for quota @@ -116,12 +119,12 @@ private boolean repairTask() { .setClientId(clientId.toString()) .build(); OzoneManagerProtocolProtos.OMResponse response = submitRequest(omRequest, clientId); - if (response != null && !response.getSuccess()) { + if (response != null && response.getSuccess()) { + REPAIR_STATUS.updateStatus(builder, om.getMetadataManager()); + } else { LOG.error("update quota repair count response failed"); REPAIR_STATUS.updateStatus("Response for update DB is failed"); return false; - } else { - REPAIR_STATUS.updateStatus(builder, om.getMetadataManager()); } } catch (Exception exp) { LOG.error("quota repair count failed", exp); @@ -145,11 +148,15 @@ private boolean repairTask() { private void repairActiveDb( OMMetadataManager metadataManager, - OzoneManagerProtocolProtos.QuotaRepairRequest.Builder builder) throws Exception { + OzoneManagerProtocolProtos.QuotaRepairRequest.Builder builder, + List buckets) throws Exception { Map nameBucketInfoMap = new HashMap<>(); Map idBucketInfoMap = new HashMap<>(); Map oriBucketInfoMap = new HashMap<>(); - prepareAllBucketInfo(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager); + prepareAllBucketInfo(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager, buckets); + if (nameBucketInfoMap.isEmpty()) { + throw new OMException("no matching buckets", OMException.ResultCodes.BUCKET_NOT_FOUND); + } repairCount(nameBucketInfoMap, idBucketInfoMap, metadataManager); @@ -174,31 +181,21 @@ private void repairActiveDb( } // update volume to support quota - builder.setSupportVolumeOldQuota(true); + if (buckets.isEmpty()) { + builder.setSupportVolumeOldQuota(true); + } else { + builder.setSupportVolumeOldQuota(false); + } } private OzoneManagerProtocolProtos.OMResponse submitRequest( - OzoneManagerProtocolProtos.OMRequest omRequest, ClientId clientId) { + OzoneManagerProtocolProtos.OMRequest omRequest, ClientId clientId) throws Exception { try { - if (om.isRatisEnabled()) { - OzoneManagerRatisServer server = om.getOmRatisServer(); - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(om.getOmRatisServer().getRaftPeerId()) - .setGroupId(om.getOmRatisServer().getRaftGroupId()) - .setCallId(runCount.getAndIncrement()) - .setMessage(Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - return server.submitRequest(omRequest, raftClientRequest); - } else { - return om.getOmServerProtocol().submitRequest( - null, omRequest); - } + return OzoneManagerRatisUtils.submitRequest(om, omRequest, clientId, RUN_CNT.getAndIncrement()); } catch (ServiceException e) { LOG.error("repair quota count " + omRequest.getCmdType() + " request failed.", e); + throw e; } - return null; } private OMMetadataManager createActiveDBCheckpoint( @@ -228,24 +225,42 @@ private static String cleanTempCheckPointPath(OMMetadataManager omMetaManager) t private void prepareAllBucketInfo( Map nameBucketInfoMap, Map idBucketInfoMap, - Map oriBucketInfoMap, OMMetadataManager metadataManager) throws IOException { + Map oriBucketInfoMap, OMMetadataManager metadataManager, + List buckets) throws IOException { + if (!buckets.isEmpty()) { + for (String bucketkey : buckets) { + OmBucketInfo bucketInfo = metadataManager.getBucketTable().get(bucketkey); + if (null == bucketInfo) { + continue; + } + populateBucket(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager, bucketInfo); + } + return; + } try (TableIterator> iterator = metadataManager.getBucketTable().iterator()) { while (iterator.hasNext()) { Table.KeyValue entry = iterator.next(); OmBucketInfo bucketInfo = entry.getValue(); - String bucketNameKey = buildNamePath(bucketInfo.getVolumeName(), - bucketInfo.getBucketName()); - oriBucketInfoMap.put(bucketNameKey, bucketInfo.copyObject()); - bucketInfo.incrUsedNamespace(-bucketInfo.getUsedNamespace()); - bucketInfo.incrUsedBytes(-bucketInfo.getUsedBytes()); - nameBucketInfoMap.put(bucketNameKey, bucketInfo); - idBucketInfoMap.put(buildIdPath(metadataManager.getVolumeId(bucketInfo.getVolumeName()), - bucketInfo.getObjectID()), bucketInfo); + populateBucket(nameBucketInfoMap, idBucketInfoMap, oriBucketInfoMap, metadataManager, bucketInfo); } } } + private static void populateBucket( + Map nameBucketInfoMap, Map idBucketInfoMap, + Map oriBucketInfoMap, OMMetadataManager metadataManager, + OmBucketInfo bucketInfo) throws IOException { + String bucketNameKey = buildNamePath(bucketInfo.getVolumeName(), + bucketInfo.getBucketName()); + oriBucketInfoMap.put(bucketNameKey, bucketInfo.copyObject()); + bucketInfo.incrUsedNamespace(-bucketInfo.getUsedNamespace()); + bucketInfo.incrUsedBytes(-bucketInfo.getUsedBytes()); + nameBucketInfoMap.put(bucketNameKey, bucketInfo); + idBucketInfoMap.put(buildIdPath(metadataManager.getVolumeId(bucketInfo.getVolumeName()), + bucketInfo.getObjectID()), bucketInfo); + } + private boolean isChange(OmBucketInfo lBucketInfo, OmBucketInfo rBucketInfo) { if (lBucketInfo.getUsedNamespace() != rBucketInfo.getUsedNamespace() || lBucketInfo.getUsedBytes() != rBucketInfo.getUsedBytes()) { @@ -468,8 +483,9 @@ public String toString() { } Map status = new HashMap<>(); status.put("taskId", taskId); - status.put("lastRunStartTime", lastRunStartTime); - status.put("lastRunFinishedTime", lastRunFinishedTime); + status.put("lastRunStartTime", lastRunStartTime > 0 ? new java.util.Date(lastRunStartTime).toString() : ""); + status.put("lastRunFinishedTime", lastRunFinishedTime > 0 ? new java.util.Date(lastRunFinishedTime).toString() + : ""); status.put("errorMsg", errorMsg); status.put("bucketCountDiffMap", bucketCountDiffMap); try { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index 99e3903447d..edc6c7a1629 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -20,57 +20,49 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.ClientVersion; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; @@ -96,16 +88,17 @@ public class SnapshotDeletingService extends AbstractKeyDeletingService { private final AtomicBoolean suspended; private final OzoneConfiguration conf; private final AtomicLong successRunCount; - private final long snapshotDeletionPerTask; - private final int keyLimitPerSnapshot; + private final int keyLimitPerTask; + private final int snapshotDeletionPerTask; private final int ratisByteLimit; + private final long serviceTimeout; public SnapshotDeletingService(long interval, long serviceTimeout, - OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient) + OzoneManager ozoneManager) throws IOException { super(SnapshotDeletingService.class.getSimpleName(), interval, TimeUnit.MILLISECONDS, SNAPSHOT_DELETING_CORE_POOL_SIZE, - serviceTimeout, ozoneManager, scmClient); + serviceTimeout, ozoneManager, null); this.ozoneManager = ozoneManager; this.omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) @@ -114,8 +107,7 @@ public SnapshotDeletingService(long interval, long serviceTimeout, this.successRunCount = new AtomicLong(0); this.suspended = new AtomicBoolean(false); this.conf = ozoneManager.getConfiguration(); - this.snapshotDeletionPerTask = conf - .getLong(SNAPSHOT_DELETING_LIMIT_PER_TASK, + this.snapshotDeletionPerTask = conf.getInt(SNAPSHOT_DELETING_LIMIT_PER_TASK, SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT); int limit = (int) conf.getStorageSize( OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, @@ -123,9 +115,35 @@ public SnapshotDeletingService(long interval, long serviceTimeout, StorageUnit.BYTES); // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); - this.keyLimitPerSnapshot = conf.getInt( + this.keyLimitPerTask = conf.getInt( OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK, OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT); + this.serviceTimeout = serviceTimeout; + } + + // Wait for a notification from KeyDeletingService if the key deletion is running. This is to ensure, merging of + // entries do not start while the AOS is still processing the deleted keys. + @VisibleForTesting + public void waitForKeyDeletingService() throws InterruptedException { + KeyDeletingService keyDeletingService = getOzoneManager().getKeyManager().getDeletingService(); + synchronized (keyDeletingService) { + while (keyDeletingService.isRunningOnAOS()) { + keyDeletingService.wait(serviceTimeout); + } + } + } + + // Wait for a notification from DirectoryDeletingService if the directory deletion is running. This is to ensure, + // merging of entries do not start while the AOS is still processing the deleted keys. + @VisibleForTesting + public void waitForDirDeletingService() throws InterruptedException { + DirectoryDeletingService directoryDeletingService = getOzoneManager().getKeyManager() + .getDirDeletingService(); + synchronized (directoryDeletingService) { + while (directoryDeletingService.isRunningOnAOS()) { + directoryDeletingService.wait(serviceTimeout); + } + } } private class SnapshotDeletingTask implements BackgroundTask { @@ -139,317 +157,89 @@ public BackgroundTaskResult call() throws InterruptedException { getRunCount().incrementAndGet(); - ReferenceCounted rcOmSnapshot = null; - ReferenceCounted rcOmPreviousSnapshot = null; - - Table snapshotInfoTable = - ozoneManager.getMetadataManager().getSnapshotInfoTable(); - List purgeSnapshotKeys = new ArrayList<>(); - try (TableIterator> iterator = snapshotInfoTable.iterator()) { - + try { + int remaining = keyLimitPerTask; + Iterator iterator = chainManager.iterator(true); + List snapshotsToBePurged = new ArrayList<>(); long snapshotLimit = snapshotDeletionPerTask; - - while (iterator.hasNext() && snapshotLimit > 0) { - SnapshotInfo snapInfo = iterator.next().getValue(); - - // Only Iterate in deleted snapshot + while (iterator.hasNext() && snapshotLimit > 0 && remaining > 0) { + SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, chainManager, iterator.next()); if (shouldIgnoreSnapshot(snapInfo)) { continue; } - - // Note: Can refactor this to use try-with-resources. - // Handling RC decrements manually for now to minimize conflicts. - rcOmSnapshot = omSnapshotManager.getSnapshot( - snapInfo.getVolumeName(), - snapInfo.getBucketName(), - snapInfo.getName()); - OmSnapshot omSnapshot = rcOmSnapshot.get(); - - Table snapshotDeletedTable = - omSnapshot.getMetadataManager().getDeletedTable(); - Table snapshotDeletedDirTable = - omSnapshot.getMetadataManager().getDeletedDirTable(); - - Table renamedTable = - omSnapshot.getMetadataManager().getSnapshotRenamedTable(); - - long volumeId = ozoneManager.getMetadataManager() - .getVolumeId(snapInfo.getVolumeName()); - // Get bucketInfo for the snapshot bucket to get bucket layout. - String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey( - snapInfo.getVolumeName(), snapInfo.getBucketName()); - OmBucketInfo bucketInfo = ozoneManager.getMetadataManager() - .getBucketTable().get(dbBucketKey); - - if (bucketInfo == null) { - // Decrement ref count - rcOmSnapshot.close(); - rcOmSnapshot = null; - throw new IllegalStateException("Bucket " + "/" + - snapInfo.getVolumeName() + "/" + snapInfo.getBucketName() + - " is not found. BucketInfo should not be null for snapshotted" + - " bucket. The OM is in unexpected state."); - } - - String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - String dbBucketKeyForDir = ozoneManager.getMetadataManager() - .getBucketKey(Long.toString(volumeId), - Long.toString(bucketInfo.getObjectID())) + OM_KEY_PREFIX; - - if (isSnapshotReclaimable(snapshotDeletedTable, - snapshotDeletedDirTable, snapshotBucketKey, dbBucketKeyForDir)) { - purgeSnapshotKeys.add(snapInfo.getTableKey()); - // Decrement ref count - rcOmSnapshot.close(); - rcOmSnapshot = null; + LOG.info("Started Snapshot Deletion Processing for snapshot : {}", snapInfo.getTableKey()); + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, chainManager, snapInfo); + // Continue if the next snapshot is not active. This is to avoid unnecessary copies from one snapshot to + // another. + if (nextSnapshot != null && + nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { continue; } - //TODO: [SNAPSHOT] Add lock to deletedTable and Active DB. - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( - snapInfo, chainManager, omSnapshotManager); - Table previousKeyTable = null; - Table previousDirTable = null; - OmSnapshot omPreviousSnapshot = null; - - // Split RepeatedOmKeyInfo and update current snapshot deletedKeyTable - // and next snapshot deletedKeyTable. - if (previousSnapshot != null) { - rcOmPreviousSnapshot = omSnapshotManager.getSnapshot( - previousSnapshot.getVolumeName(), - previousSnapshot.getBucketName(), - previousSnapshot.getName()); - omPreviousSnapshot = rcOmPreviousSnapshot.get(); - - previousKeyTable = omPreviousSnapshot - .getMetadataManager().getKeyTable(bucketInfo.getBucketLayout()); - previousDirTable = omPreviousSnapshot - .getMetadataManager().getDirectoryTable(); + // nextSnapshot = null means entries would be moved to AOS. + if (nextSnapshot == null) { + waitForKeyDeletingService(); + waitForDirDeletingService(); } - - // Move key to either next non deleted snapshot's deletedTable - // or keep it in current snapshot deleted table. - List toReclaimList = new ArrayList<>(); - List toNextDBList = new ArrayList<>(); - // A list of renamed keys/files/dirs - List renamedList = new ArrayList<>(); - List dirsToMove = new ArrayList<>(); - - long remainNum = handleDirectoryCleanUp(snapshotDeletedDirTable, - previousDirTable, renamedTable, dbBucketKeyForDir, snapInfo, - omSnapshot, dirsToMove, renamedList); - int deletionCount = 0; - - try (TableIterator> deletedIterator = snapshotDeletedTable - .iterator()) { - - List keysToPurge = new ArrayList<>(); - deletedIterator.seek(snapshotBucketKey); - - while (deletedIterator.hasNext() && - deletionCount < remainNum) { - Table.KeyValue - deletedKeyValue = deletedIterator.next(); - String deletedKey = deletedKeyValue.getKey(); - - // Exit if it is out of the bucket scope. - if (!deletedKey.startsWith(snapshotBucketKey)) { - // If snapshot deletedKeyTable doesn't have any - // entry in the snapshot scope it can be reclaimed - break; - } - - RepeatedOmKeyInfo repeatedOmKeyInfo = deletedKeyValue.getValue(); - - SnapshotMoveKeyInfos.Builder toReclaim = SnapshotMoveKeyInfos - .newBuilder() - .setKey(deletedKey); - SnapshotMoveKeyInfos.Builder toNextDb = SnapshotMoveKeyInfos - .newBuilder() - .setKey(deletedKey); - HddsProtos.KeyValue.Builder renamedKey = HddsProtos.KeyValue - .newBuilder(); - - for (OmKeyInfo keyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { - splitRepeatedOmKeyInfo(toReclaim, toNextDb, renamedKey, - keyInfo, previousKeyTable, renamedTable, - bucketInfo, volumeId); + try (ReferenceCounted snapshot = omSnapshotManager.getSnapshot( + snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName())) { + KeyManager snapshotKeyManager = snapshot.get().getKeyManager(); + int moveCount = 0; + // Get all entries from deletedKeyTable. + List>> deletedKeyEntries = + snapshotKeyManager.getDeletedKeyEntries(snapInfo.getVolumeName(), snapInfo.getBucketName(), + null, remaining); + moveCount += deletedKeyEntries.size(); + // Get all entries from deletedDirTable. + List> deletedDirEntries = snapshotKeyManager.getDeletedDirEntries( + snapInfo.getVolumeName(), snapInfo.getBucketName(), remaining - moveCount); + moveCount += deletedDirEntries.size(); + // Get all entries from snapshotRenamedTable. + List> renameEntries = snapshotKeyManager.getRenamesKeyEntries( + snapInfo.getVolumeName(), snapInfo.getBucketName(), null, remaining - moveCount); + moveCount += renameEntries.size(); + if (moveCount > 0) { + List deletedKeys = new ArrayList<>(deletedKeyEntries.size()); + List deletedDirs = new ArrayList<>(deletedDirEntries.size()); + List renameKeys = new ArrayList<>(renameEntries.size()); + + // Convert deletedKeyEntries to SnapshotMoveKeyInfos. + for (Table.KeyValue> deletedEntry : deletedKeyEntries) { + deletedKeys.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedEntry.getKey()) + .addAllKeyInfos(deletedEntry.getValue() + .stream().map(val -> val.getProtobuf(ClientVersion.CURRENT_VERSION)) + .collect(Collectors.toList())).build()); } - // If all the KeyInfos are reclaimable in RepeatedOmKeyInfo - // then no need to update current snapshot deletedKeyTable. - if (!(toReclaim.getKeyInfosCount() == - repeatedOmKeyInfo.getOmKeyInfoList().size())) { - toReclaimList.add(toReclaim.build()); - toNextDBList.add(toNextDb.build()); - } else { - // The key can be reclaimed here. - List blocksForKeyDelete = omSnapshot - .getMetadataManager() - .getBlocksForKeyDelete(deletedKey); - if (blocksForKeyDelete != null) { - keysToPurge.addAll(blocksForKeyDelete); - } + // Convert deletedDirEntries to SnapshotMoveKeyInfos. + for (Table.KeyValue deletedDirEntry : deletedDirEntries) { + deletedDirs.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedDirEntry.getKey()) + .addKeyInfos(deletedDirEntry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); } - if (renamedKey.hasKey() && renamedKey.hasValue()) { - renamedList.add(renamedKey.build()); + // Convert renamedEntries to KeyValue. + for (Table.KeyValue renameEntry : renameEntries) { + renameKeys.add(HddsProtos.KeyValue.newBuilder().setKey(renameEntry.getKey()) + .setValue(renameEntry.getValue()).build()); } - deletionCount++; + submitSnapshotMoveDeletedKeys(snapInfo, deletedKeys, renameKeys, deletedDirs); + remaining -= moveCount; + } else { + snapshotsToBePurged.add(snapInfo.getTableKey()); } - - // Delete keys From deletedTable - processKeyDeletes(keysToPurge, omSnapshot.getKeyManager(), - null, snapInfo.getTableKey()); - successRunCount.incrementAndGet(); - } catch (IOException ex) { - LOG.error("Error while running Snapshot Deleting Service for " + - "snapshot " + snapInfo.getTableKey() + " with snapshotId " + - snapInfo.getSnapshotId() + ". Processed " + deletionCount + - " keys and " + (keyLimitPerSnapshot - remainNum) + - " directories and files", ex); } + successRunCount.incrementAndGet(); snapshotLimit--; - // Submit Move request to OM. - submitSnapshotMoveDeletedKeys(snapInfo, toReclaimList, - toNextDBList, renamedList, dirsToMove); - - // Properly decrement ref count for rcOmPreviousSnapshot - if (rcOmPreviousSnapshot != null) { - rcOmPreviousSnapshot.close(); - rcOmPreviousSnapshot = null; - } + } + if (!snapshotsToBePurged.isEmpty()) { + submitSnapshotPurgeRequest(snapshotsToBePurged); } } catch (IOException e) { LOG.error("Error while running Snapshot Deleting Service", e); - } finally { - // Decrement ref counts - if (rcOmPreviousSnapshot != null) { - rcOmPreviousSnapshot.close(); - } - if (rcOmSnapshot != null) { - rcOmSnapshot.close(); - } } - submitSnapshotPurgeRequest(purgeSnapshotKeys); - return BackgroundTaskResult.EmptyTaskResult.newResult(); } - private boolean isSnapshotReclaimable( - Table snapshotDeletedTable, - Table snapshotDeletedDirTable, - String snapshotBucketKey, String dbBucketKeyForDir) throws IOException { - - boolean isDirTableCleanedUp = false; - boolean isKeyTableCleanedUp = false; - try (TableIterator> iterator = snapshotDeletedTable.iterator();) { - iterator.seek(snapshotBucketKey); - // If the next entry doesn't start with snapshotBucketKey then - // deletedKeyTable is already cleaned up. - isKeyTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() - .startsWith(snapshotBucketKey); - } - - try (TableIterator> - iterator = snapshotDeletedDirTable.iterator()) { - iterator.seek(dbBucketKeyForDir); - // If the next entry doesn't start with dbBucketKeyForDir then - // deletedDirTable is already cleaned up. - isDirTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() - .startsWith(dbBucketKeyForDir); - } - - return (isDirTableCleanedUp || snapshotDeletedDirTable.isEmpty()) && - (isKeyTableCleanedUp || snapshotDeletedTable.isEmpty()); - } - - @SuppressWarnings("checkstyle:ParameterNumber") - private long handleDirectoryCleanUp( - Table snapshotDeletedDirTable, - Table previousDirTable, - Table renamedTable, - String dbBucketKeyForDir, SnapshotInfo snapInfo, - OmSnapshot omSnapshot, List dirsToMove, - List renamedList) { - - long dirNum = 0L; - long subDirNum = 0L; - long subFileNum = 0L; - long remainNum = keyLimitPerSnapshot; - int consumedSize = 0; - List purgePathRequestList = new ArrayList<>(); - List> allSubDirList - = new ArrayList<>(keyLimitPerSnapshot); - try (TableIterator> deletedDirIterator = - snapshotDeletedDirTable.iterator()) { - - long startTime = Time.monotonicNow(); - deletedDirIterator.seek(dbBucketKeyForDir); - - while (deletedDirIterator.hasNext()) { - Table.KeyValue deletedDir = - deletedDirIterator.next(); - String deletedDirKey = deletedDir.getKey(); - - // Exit for dirs out of snapshot scope. - if (!deletedDirKey.startsWith(dbBucketKeyForDir)) { - break; - } - - if (isDirReclaimable(deletedDir, previousDirTable, - renamedTable, renamedList)) { - // Reclaim here - PurgePathRequest request = prepareDeleteDirRequest( - remainNum, deletedDir.getValue(), deletedDir.getKey(), - allSubDirList, omSnapshot.getKeyManager()); - if (isBufferLimitCrossed(ratisByteLimit, consumedSize, - request.getSerializedSize())) { - if (purgePathRequestList.size() != 0) { - // if message buffer reaches max limit, avoid sending further - remainNum = 0; - break; - } - // if directory itself is having a lot of keys / files, - // reduce capacity to minimum level - remainNum = MIN_ERR_LIMIT_PER_TASK; - request = prepareDeleteDirRequest( - remainNum, deletedDir.getValue(), deletedDir.getKey(), - allSubDirList, omSnapshot.getKeyManager()); - } - consumedSize += request.getSerializedSize(); - purgePathRequestList.add(request); - remainNum = remainNum - request.getDeletedSubFilesCount(); - remainNum = remainNum - request.getMarkDeletedSubDirsCount(); - // Count up the purgeDeletedDir, subDirs and subFiles - if (request.getDeletedDir() != null - && !request.getDeletedDir().isEmpty()) { - dirNum++; - } - subDirNum += request.getMarkDeletedSubDirsCount(); - subFileNum += request.getDeletedSubFilesCount(); - } else { - dirsToMove.add(deletedDir.getKey()); - } - } - - remainNum = optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, - subDirNum, subFileNum, allSubDirList, purgePathRequestList, - snapInfo.getTableKey(), startTime, ratisByteLimit - consumedSize, - omSnapshot.getKeyManager()); - } catch (IOException e) { - LOG.error("Error while running delete directories and files for " + - "snapshot " + snapInfo.getTableKey() + " in snapshot deleting " + - "background task. Will retry at next run.", e); - } - - return remainNum; - } - private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { if (!purgeSnapshotKeys.isEmpty()) { SnapshotPurgeRequest snapshotPurgeRequest = SnapshotPurgeRequest @@ -467,92 +257,36 @@ private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { } } - @SuppressWarnings("checkstyle:ParameterNumber") - private void splitRepeatedOmKeyInfo(SnapshotMoveKeyInfos.Builder toReclaim, - SnapshotMoveKeyInfos.Builder toNextDb, - HddsProtos.KeyValue.Builder renamedKey, OmKeyInfo keyInfo, - Table previousKeyTable, - Table renamedTable, - OmBucketInfo bucketInfo, long volumeId) throws IOException { - - if (isKeyReclaimable(previousKeyTable, renamedTable, - keyInfo, bucketInfo, volumeId, renamedKey)) { - // Update in current db's deletedKeyTable - toReclaim.addKeyInfos(keyInfo - .getProtobuf(ClientVersion.CURRENT_VERSION)); - } else { - // Move to next non deleted snapshot's deleted table - toNextDb.addKeyInfos(keyInfo.getProtobuf( - ClientVersion.CURRENT_VERSION)); - } - } - - private boolean isDirReclaimable( - Table.KeyValue deletedDir, - Table previousDirTable, - Table renamedTable, - List renamedList) throws IOException { + private void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, + List deletedKeys, + List renamedList, + List dirsToMove) { - if (previousDirTable == null) { - return true; - } - - String deletedDirDbKey = deletedDir.getKey(); - OmKeyInfo deletedDirInfo = deletedDir.getValue(); - String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( - deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), - deletedDirInfo.getObjectID()); - - /* - snapshotRenamedTable: /volumeName/bucketName/objectID -> - /volumeId/bucketId/parentId/dirName - */ - String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); - String prevDbKey = null; - - if (dbKeyBeforeRename != null) { - prevDbKey = dbKeyBeforeRename; - HddsProtos.KeyValue renamedDir = HddsProtos.KeyValue - .newBuilder() - .setKey(dbRenameKey) - .setValue(dbKeyBeforeRename) - .build(); - renamedList.add(renamedDir); - } else { - // In OMKeyDeleteResponseWithFSO OzonePathKey is converted to - // OzoneDeletePathKey. Changing it back to check the previous DirTable. - prevDbKey = ozoneManager.getMetadataManager() - .getOzoneDeletePathDirKey(deletedDirDbKey); - } - - OmDirectoryInfo prevDirectoryInfo = previousDirTable.get(prevDbKey); - if (prevDirectoryInfo == null) { - return true; - } - - return prevDirectoryInfo.getObjectID() != deletedDirInfo.getObjectID(); - } - - public void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, - List toReclaimList, - List toNextDBList, - List renamedList, - List dirsToMove) throws InterruptedException { + SnapshotMoveTableKeysRequest.Builder moveDeletedKeysBuilder = SnapshotMoveTableKeysRequest.newBuilder() + .setFromSnapshotID(toProtobuf(snapInfo.getSnapshotId())); - SnapshotMoveDeletedKeysRequest.Builder moveDeletedKeysBuilder = - SnapshotMoveDeletedKeysRequest.newBuilder() - .setFromSnapshot(snapInfo.getProtobuf()); - - SnapshotMoveDeletedKeysRequest moveDeletedKeys = moveDeletedKeysBuilder - .addAllReclaimKeys(toReclaimList) - .addAllNextDBKeys(toNextDBList) + SnapshotMoveTableKeysRequest moveDeletedKeys = moveDeletedKeysBuilder + .addAllDeletedKeys(deletedKeys) .addAllRenamedKeys(renamedList) - .addAllDeletedDirsToMove(dirsToMove) + .addAllDeletedDirs(dirsToMove) .build(); + if (isBufferLimitCrossed(ratisByteLimit, 0, moveDeletedKeys.getSerializedSize())) { + int remaining = MIN_ERR_LIMIT_PER_TASK; + deletedKeys = deletedKeys.subList(0, Math.min(remaining, deletedKeys.size())); + remaining -= deletedKeys.size(); + renamedList = renamedList.subList(0, Math.min(remaining, renamedList.size())); + remaining -= renamedList.size(); + dirsToMove = dirsToMove.subList(0, Math.min(remaining, dirsToMove.size())); + moveDeletedKeys = moveDeletedKeysBuilder + .addAllDeletedKeys(deletedKeys) + .addAllRenamedKeys(renamedList) + .addAllDeletedDirs(dirsToMove) + .build(); + } OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SnapshotMoveDeletedKeys) - .setSnapshotMoveDeletedKeysRequest(moveDeletedKeys) + .setCmdType(Type.SnapshotMoveTableKeys) + .setSnapshotMoveTableKeysRequest(moveDeletedKeys) .setClientId(clientId.toString()) .build(); @@ -561,36 +295,26 @@ public void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, } } - public void submitRequest(OMRequest omRequest) { + private void submitRequest(OMRequest omRequest) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = ozoneManager.getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(getRunCount().get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - ozoneManager.getOmServerProtocol().submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, getRunCount().get()); } catch (ServiceException e) { - LOG.error("Snapshot Deleting request failed. " + - "Will retry at next run.", e); + LOG.error("Request: {} fired by SnapshotDeletingService failed. Will retry in the next run", omRequest, e); } } } + /** + * Checks if a given snapshot has been deleted and all the changes made to snapshot have been flushed to disk. + * @param snapInfo SnapshotInfo corresponding to the snapshot. + * @return true if the snapshot is still active or changes to snapshot have not been flushed to disk otherwise false. + * @throws IOException + */ @VisibleForTesting - boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) { + boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) throws IOException { SnapshotInfo.SnapshotStatus snapshotStatus = snapInfo.getSnapshotStatus(); - return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED; + return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), snapInfo); } // TODO: Move this util class. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index fe0f6e111ed..e7133e62589 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -34,13 +34,12 @@ import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -48,8 +47,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import java.io.IOException; import java.util.ArrayList; @@ -64,6 +61,7 @@ import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getPreviousSnapshot; /** * Snapshot BG Service for deleted directory deep clean and exclusive size @@ -146,11 +144,11 @@ public BackgroundTaskResult call() { > iterator = snapshotInfoTable.iterator()) { while (iterator.hasNext()) { - SnapshotInfo currSnapInfo = iterator.next().getValue(); + SnapshotInfo currSnapInfo = snapshotInfoTable.get(iterator.next().getKey()); // Expand deleted dirs only on active snapshot. Deleted Snapshots // will be cleaned up by SnapshotDeletingService. - if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + if (currSnapInfo == null || currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || currSnapInfo.getDeepCleanedDeletedDir()) { continue; } @@ -176,8 +174,7 @@ public BackgroundTaskResult call() { "unexpected state."); } - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( - currSnapInfo, snapChainManager, omSnapshotManager); + SnapshotInfo previousSnapshot = getPreviousSnapshot(getOzoneManager(), snapChainManager, currSnapInfo); SnapshotInfo previousToPrevSnapshot = null; Table previousKeyTable = null; @@ -194,8 +191,7 @@ public BackgroundTaskResult call() { .getKeyTable(bucketInfo.getBucketLayout()); prevRenamedTable = omPreviousSnapshot .getMetadataManager().getSnapshotRenamedTable(); - previousToPrevSnapshot = getPreviousActiveSnapshot( - previousSnapshot, snapChainManager, omSnapshotManager); + previousToPrevSnapshot = getPreviousSnapshot(getOzoneManager(), snapChainManager, previousSnapshot); } Table previousToPrevKeyTable = null; @@ -438,25 +434,7 @@ private void updateDeepCleanSnapshotDir(String snapshotKeyTable) { public void submitRequest(OMRequest omRequest, ClientId clientId) { try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = - getOzoneManager().getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(getRunCount().get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - getOzoneManager().getOmServerProtocol() - .submitRequest(null, omRequest); - } + OzoneManagerRatisUtils.submitRequest(getOzoneManager(), omRequest, clientId, getRunCount().get()); } catch (ServiceException e) { LOG.error("Snapshot deep cleaning request failed. " + "Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java index 05b0e5b0cdc..b400fb6ed76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java @@ -77,7 +77,7 @@ public static Object getINode(Path file) throws IOException { * sst compaction backup directory) * * @param truncateLength - Length of initial path to trim in file path. - * @param hardLinkFiles - Map of link->file paths. + * @param hardLinkFiles - Map of link->file paths. * @return Path to the file of links created. */ public static Path createHardLinkList(int truncateLength, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index db6d9b7b908..8add87f0633 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -284,7 +284,8 @@ private boolean initNativeLibraryForEfficientDiff(final OzoneConfiguration conf) try { return ManagedRawSSTFileReader.loadLibrary(); } catch (NativeLibraryNotLoadedException e) { - LOG.error("Native Library for raw sst file reading loading failed.", e); + LOG.warn("Native Library for raw sst file reading loading failed." + + " Fallback to performing a full diff instead. {}", e.getMessage()); return false; } } @@ -1031,8 +1032,10 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( // tombstone is not loaded. // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone if (skipNativeDiff || !isNativeLibsLoaded) { - deltaFiles.addAll(getSSTFileListForSnapshot(fromSnapshot, - tablesToLookUp)); + Set inputFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp); + ManagedRocksDB fromDB = ((RDBStore)fromSnapshot.getMetadataManager().getStore()).getDb().getManagedRocksDb(); + RocksDiffUtils.filterRelevantSstFiles(inputFiles, tablePrefixes, fromDB); + deltaFiles.addAll(inputFiles); } addToObjectIdMap(fsTable, tsTable, deltaFiles, !skipNativeDiff && isNativeLibsLoaded, @@ -1124,7 +1127,7 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, String diffDir) throws IOException { // TODO: [SNAPSHOT] Refactor the parameter list - final Set deltaFiles = new HashSet<>(); + Optional> deltaFiles = Optional.empty(); // Check if compaction DAG is available, use that if so if (differ != null && fsInfo != null && tsInfo != null && !useFullDiff) { @@ -1138,40 +1141,36 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, LOG.debug("Calling RocksDBCheckpointDiffer"); try { - List sstDiffList = differ.getSSTDiffListWithFullPath(toDSI, - fromDSI, diffDir); - deltaFiles.addAll(sstDiffList); + deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, diffDir).map(HashSet::new); } catch (Exception exception) { LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + "It will fallback to full diff now.", exception); } } - if (useFullDiff || deltaFiles.isEmpty()) { + if (useFullDiff || !deltaFiles.isPresent()) { // If compaction DAG is not available (already cleaned up), fall back to // the slower approach. if (!useFullDiff) { LOG.warn("RocksDBCheckpointDiffer is not available, falling back to" + " slow path"); } - - Set fromSnapshotFiles = - RdbUtil.getSSTFilesForComparison( - ((RDBStore)fromSnapshot.getMetadataManager().getStore()) - .getDb().getManagedRocksDb(), - tablesToLookUp); - Set toSnapshotFiles = - RdbUtil.getSSTFilesForComparison( - ((RDBStore)toSnapshot.getMetadataManager().getStore()).getDb() - .getManagedRocksDb(), - tablesToLookUp); - - deltaFiles.addAll(fromSnapshotFiles); - deltaFiles.addAll(toSnapshotFiles); - RocksDiffUtils.filterRelevantSstFiles(deltaFiles, tablePrefixes); + ManagedRocksDB fromDB = ((RDBStore)fromSnapshot.getMetadataManager().getStore()) + .getDb().getManagedRocksDb(); + ManagedRocksDB toDB = ((RDBStore)toSnapshot.getMetadataManager().getStore()) + .getDb().getManagedRocksDb(); + Set fromSnapshotFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp); + Set toSnapshotFiles = getSSTFileListForSnapshot(toSnapshot, tablesToLookUp); + Set diffFiles = new HashSet<>(); + diffFiles.addAll(fromSnapshotFiles); + diffFiles.addAll(toSnapshotFiles); + RocksDiffUtils.filterRelevantSstFiles(diffFiles, tablePrefixes, fromDB, toDB); + deltaFiles = Optional.of(diffFiles); } - return deltaFiles; + return deltaFiles.orElseThrow(() -> + new IOException("Error getting diff files b/w " + fromSnapshot.getSnapshotTableKey() + " and " + + toSnapshot.getSnapshotTableKey())); } private void validateEstimatedKeyChangesAreInLimits( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 2041fa791a7..0ac504246f6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -21,12 +21,14 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; import org.slf4j.Logger; @@ -34,18 +36,20 @@ import java.io.File; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.NoSuchElementException; import java.util.HashMap; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TIMEOUT; /** @@ -82,11 +86,18 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, } if (snapshotInfo == null) { throw new OMException("Snapshot '" + snapshotKey + "' is not found.", - KEY_NOT_FOUND); + FILE_NOT_FOUND); } return snapshotInfo; } + public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + UUID snapshotId) throws IOException { + String tableKey = chainManager.getTableKey(snapshotId); + return SnapshotUtils.getSnapshotInfo(ozoneManager, tableKey); + } + public static void dropColumnFamilyHandle( final ManagedRocksDB rocksDB, final ColumnFamilyHandle columnFamilyHandle) { @@ -140,37 +151,24 @@ public static void checkSnapshotActive(SnapshotInfo snapInfo, } /** - * Get the next non deleted snapshot in the snapshot chain. + * Get the next snapshot in the snapshot chain. */ - public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, - SnapshotChainManager chainManager, OmSnapshotManager omSnapshotManager) + public static SnapshotInfo getNextSnapshot(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + SnapshotInfo snapInfo) throws IOException { - // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. if (snapInfo == null) { - throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); + throw new OMException("Provided Snapshot Info argument is null. Cannot get the next snapshot for a null value", + FILE_NOT_FOUND); } - try { - while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), + if (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { - - UUID nextPathSnapshot = - chainManager.nextPathSnapshot( - snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - - String tableKey = chainManager.getTableKey(nextPathSnapshot); - SnapshotInfo nextSnapshotInfo = - omSnapshotManager.getSnapshotInfo(tableKey); - - if (nextSnapshotInfo.getSnapshotStatus().equals( - SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE)) { - return nextSnapshotInfo; - } - - snapInfo = nextSnapshotInfo; + UUID nextPathSnapshot = chainManager.nextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); + return getSnapshotInfo(ozoneManager, chainManager, nextPathSnapshot); } } catch (NoSuchElementException ex) { LOG.error("The snapshot {} is not longer in snapshot chain, It " + @@ -180,6 +178,41 @@ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, return null; } + /** + * Get the previous snapshot in the snapshot chain. + */ + public static SnapshotInfo getPreviousSnapshot(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + SnapshotInfo snapInfo) + throws IOException { + UUID previousSnapshotId = getPreviousSnapshotId(snapInfo, chainManager); + return previousSnapshotId == null ? null : getSnapshotInfo(ozoneManager, chainManager, previousSnapshotId); + } + + /** + * Get the previous snapshot in the snapshot chain. + */ + private static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainManager chainManager) + throws IOException { + // If the snapshot is deleted in the previous run, then the in-memory + // SnapshotChainManager might throw NoSuchElementException as the snapshot + // is removed in-memory but OMDoubleBuffer has not flushed yet. + if (snapInfo == null) { + throw new OMException("Provided Snapshot Info argument is null. Cannot get the previous snapshot for a null " + + "value", FILE_NOT_FOUND); + } + try { + if (chainManager.hasPreviousPathSnapshot(snapInfo.getSnapshotPath(), + snapInfo.getSnapshotId())) { + return chainManager.previousPathSnapshot(snapInfo.getSnapshotPath(), + snapInfo.getSnapshotId()); + } + } catch (NoSuchElementException ignored) { + + } + return null; + } + /** * Return a map column family to prefix for the keys in the table for * the given volume and bucket. @@ -209,7 +242,7 @@ public static Map getColumnFamilyToKeyPrefixMap( *

    * Note: Currently, this is only intended to be a special use case in * Snapshot. If this is used elsewhere, consider moving this to - * @link OMMetadataManager}. + * {@link OMMetadataManager}. * * @param volumeName volume name * @param bucketName bucket name @@ -244,4 +277,74 @@ public static String getOzonePathKeyForFso(OMMetadataManager metadataManager, final long bucketId = metadataManager.getBucketId(volumeName, bucketName); return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; } + + /** + * Returns merged repeatedKeyInfo entry with the existing deleted entry in the table. + * @param snapshotMoveKeyInfos keyInfos to be added. + * @param metadataManager metadataManager for a store. + * @return RepeatedOmKeyInfo + * @throws IOException + */ + public static RepeatedOmKeyInfo createMergedRepeatedOmKeyInfoFromDeletedTableEntry( + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos, OMMetadataManager metadataManager) throws + IOException { + String dbKey = snapshotMoveKeyInfos.getKey(); + List keyInfoList = new ArrayList<>(); + for (OzoneManagerProtocolProtos.KeyInfo info : snapshotMoveKeyInfos.getKeyInfosList()) { + OmKeyInfo fromProtobuf = OmKeyInfo.getFromProtobuf(info); + keyInfoList.add(fromProtobuf); + } + // When older version of keys are moved to the next snapshot's deletedTable + // The newer version might also be in the next snapshot's deletedTable and + // it might overwrite the existing value which inturn could lead to orphan block in the system. + // Checking the keyInfoList with the last n versions of the omKeyInfo versions would ensure all versions are + // present in the list and would also avoid redundant additions to the list if the last n versions match, which + // can happen on om transaction replay on snapshotted rocksdb. + RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); + if (result == null) { + result = new RepeatedOmKeyInfo(keyInfoList); + } else if (!isSameAsLatestOmKeyInfo(keyInfoList, result)) { + keyInfoList.forEach(result::addOmKeyInfo); + } + return result; + } + + private static boolean isSameAsLatestOmKeyInfo(List omKeyInfos, + RepeatedOmKeyInfo result) { + int size = result.getOmKeyInfoList().size(); + if (size >= omKeyInfos.size()) { + return omKeyInfos.equals(result.getOmKeyInfoList().subList(size - omKeyInfos.size(), size)); + } + return false; + } + + public static SnapshotInfo getLatestSnapshotInfo(String volumeName, String bucketName, + OzoneManager ozoneManager, + SnapshotChainManager snapshotChainManager) throws IOException { + Optional latestPathSnapshot = Optional.ofNullable( + getLatestPathSnapshotId(volumeName, bucketName, snapshotChainManager)); + return latestPathSnapshot.isPresent() ? + getSnapshotInfo(ozoneManager, snapshotChainManager, latestPathSnapshot.get()) : null; + } + + public static UUID getLatestPathSnapshotId(String volumeName, String bucketName, + SnapshotChainManager snapshotChainManager) throws IOException { + String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; + return snapshotChainManager.getLatestPathSnapshotId(snapshotPath); + } + + // Validates the previous path snapshotId for given a snapshotInfo. In case snapshotInfo is + // null, the snapshotInfo would be considered as AOS and previous snapshot becomes the latest snapshot in the global + // snapshot chain. Would throw OMException if validation fails otherwise function would pass. + public static void validatePreviousSnapshotId(SnapshotInfo snapshotInfo, + SnapshotChainManager snapshotChainManager, + UUID expectedPreviousSnapshotId) throws IOException { + UUID previousSnapshotId = snapshotInfo == null ? snapshotChainManager.getLatestGlobalSnapshotId() : + SnapshotUtils.getPreviousSnapshotId(snapshotInfo, snapshotChainManager); + if (!Objects.equals(expectedPreviousSnapshotId, previousSnapshotId)) { + throw new OMException("Snapshot validation failed. Expected previous snapshotId : " + + expectedPreviousSnapshotId + " but was " + previousSnapshotId, + OMException.ResultCodes.INVALID_REQUEST); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java index 7cdff8f5c11..e5d9901fda1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java @@ -40,12 +40,14 @@ public enum OMLayoutFeature implements LayoutFeature { MULTITENANCY_SCHEMA(3, "Multi-Tenancy Schema"), + @Deprecated HSYNC(4, "Support hsync"), FILESYSTEM_SNAPSHOT(5, "Ozone version supporting snapshot"), QUOTA(6, "Ozone quota re-calculate"), - HBASE_SUPPORT(7, "Full support of hsync, lease recovery and listOpenFiles APIs for HBase"); + HBASE_SUPPORT(7, "Full support of hsync, lease recovery and listOpenFiles APIs for HBase"), + DELEGATION_TOKEN_SYMMETRIC_SIGN(8, "Delegation token signed by symmetric key"); /////////////////////////////// ///////////////////////////// // Example OM Layout Feature with Actions diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 4506337e54d..6b55b7384bd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -176,9 +176,7 @@ public OMResponse processRequest(OMRequest request) throws ServiceException { return response; } - private OMResponse internalProcessRequest(OMRequest request) throws - ServiceException { - OMClientRequest omClientRequest = null; + private OMResponse internalProcessRequest(OMRequest request) throws ServiceException { boolean s3Auth = false; try { @@ -207,7 +205,16 @@ private OMResponse internalProcessRequest(OMRequest request) throws if (!s3Auth) { OzoneManagerRatisUtils.checkLeaderStatus(ozoneManager); } - OMRequest requestToSubmit; + + // check retry cache + final OMResponse cached = omRatisServer.checkRetryCache(); + if (cached != null) { + return cached; + } + + // process new request + OMClientRequest omClientRequest = null; + final OMRequest requestToSubmit; try { omClientRequest = createClientRequest(request, ozoneManager); // TODO: Note: Due to HDDS-6055, createClientRequest() could now @@ -215,6 +222,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws // Added the assertion. assert (omClientRequest != null); OMClientRequest finalOmClientRequest = omClientRequest; + requestToSubmit = preExecute(finalOmClientRequest); this.lastRequestToSubmit = requestToSubmit; } catch (IOException ex) { @@ -225,7 +233,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws return createErrorResponse(request, ex); } - OMResponse response = submitRequestToRatis(requestToSubmit); + final OMResponse response = omRatisServer.submitRequest(requestToSubmit); if (!response.getSuccess()) { omClientRequest.handleRequestFailure(ozoneManager); } @@ -246,14 +254,6 @@ public OMRequest getLastRequestToSubmit() { return lastRequestToSubmit; } - /** - * Submits request to OM's Ratis server. - */ - private OMResponse submitRequestToRatis(OMRequest request) - throws ServiceException { - return omRatisServer.submitRequest(request); - } - private OMResponse submitReadRequestToOM(OMRequest request) throws ServiceException { // Check if this OM is the leader. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index a5e94689aee..ab1f68d9928 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; @@ -42,6 +43,9 @@ import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; @@ -63,7 +67,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; @@ -113,8 +116,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyRequest; @@ -133,7 +134,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffResponse; @@ -146,6 +146,8 @@ import com.google.common.collect.Lists; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; @@ -181,9 +183,16 @@ public class OzoneManagerRequestHandler implements RequestHandler { LoggerFactory.getLogger(OzoneManagerRequestHandler.class); private final OzoneManager impl; private FaultInjector injector; + private long maxKeyListSize; + public OzoneManagerRequestHandler(OzoneManager om) { this.impl = om; + this.maxKeyListSize = om.getConfiguration().getLong(OZONE_OM_SERVER_LIST_MAX_SIZE, + OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT); + if (this.maxKeyListSize <= 0) { + this.maxKeyListSize = OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT; + } } //TODO simplify it to make it shorter @@ -238,11 +247,6 @@ public OMResponse handleReadRequest(OMRequest request) { request.getListKeysRequest()); responseBuilder.setListKeysLightResponse(listKeysLightResponse); break; - case ListTrash: - ListTrashResponse listTrashResponse = listTrash( - request.getListTrashRequest(), request.getVersion()); - responseBuilder.setListTrashResponse(listTrashResponse); - break; case ListMultiPartUploadParts: MultipartUploadListPartsResponse listPartsResponse = listParts(request.getListMultipartUploadPartsRequest()); @@ -384,11 +388,20 @@ public OMResponse handleReadRequest(OMRequest request) { getSnapshotInfo(request.getSnapshotInfoRequest()); responseBuilder.setSnapshotInfoResponse(snapshotInfoResponse); break; - case GetServerDefaults: - responseBuilder.setServerDefaultsResponse( - ServerDefaultsResponse.newBuilder() - .setServerDefaults(impl.getServerDefaults().getProtobuf()) - .build()); + case GetQuotaRepairStatus: + OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse quotaRepairStatusRsp = + getQuotaRepairStatus(request.getGetQuotaRepairStatusRequest()); + responseBuilder.setGetQuotaRepairStatusResponse(quotaRepairStatusRsp); + break; + case StartQuotaRepair: + OzoneManagerProtocolProtos.StartQuotaRepairResponse startQuotaRepairRsp = + startQuotaRepair(request.getStartQuotaRepairRequest()); + responseBuilder.setStartQuotaRepairResponse(startQuotaRepairRsp); + break; + case GetObjectTagging: + OzoneManagerProtocolProtos.GetObjectTaggingResponse getObjectTaggingResponse = + getObjectTagging(request.getGetObjectTaggingRequest()); + responseBuilder.setGetObjectTaggingResponse(getObjectTaggingResponse); break; default: responseBuilder.setSuccess(false); @@ -741,7 +754,7 @@ private ListKeysResponse listKeys(ListKeysRequest request, int clientVersion) request.getBucketName(), request.getStartKey(), request.getPrefix(), - request.getCount()); + (int)Math.min(this.maxKeyListSize, request.getCount())); for (OmKeyInfo key : listKeysResult.getKeys()) { resp.addKeyInfo(key.getProtobuf(true, clientVersion)); } @@ -759,7 +772,7 @@ private ListKeysLightResponse listKeysLight(ListKeysRequest request) request.getBucketName(), request.getStartKey(), request.getPrefix(), - request.getCount()); + (int)Math.min(this.maxKeyListSize, request.getCount())); for (BasicOmKeyInfo key : listKeysLightResult.getKeys()) { resp.addBasicKeyInfo(key.getProtobuf()); } @@ -835,26 +848,6 @@ public static OMResponse disallowListKeysWithBucketLayout( return resp; } - private ListTrashResponse listTrash(ListTrashRequest request, - int clientVersion) throws IOException { - - ListTrashResponse.Builder resp = - ListTrashResponse.newBuilder(); - - List deletedKeys = impl.listTrash( - request.getVolumeName(), - request.getBucketName(), - request.getStartKeyName(), - request.getKeyPrefix(), - request.getMaxKeys()); - - for (RepeatedOmKeyInfo key: deletedKeys) { - resp.addDeletedKeys(key.getProto(false, clientVersion)); - } - - return resp.build(); - } - @RequestFeatureValidator( conditions = ValidationCondition.OLDER_CLIENT_REQUESTS, processingPhase = RequestProcessingPhase.POST_PROCESS, @@ -1250,7 +1243,7 @@ private ListStatusResponse listStatus( request.hasAllowPartialPrefix() && request.getAllowPartialPrefix(); List statuses = impl.listStatus(omKeyArgs, request.getRecursive(), - request.getStartKey(), request.getNumEntries(), + request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()), allowPartialPrefixes); ListStatusResponse.Builder listStatusResponseBuilder = @@ -1276,7 +1269,7 @@ private ListStatusLightResponse listStatusLight( request.hasAllowPartialPrefix() && request.getAllowPartialPrefix(); List statuses = impl.listStatusLight(omKeyArgs, request.getRecursive(), - request.getStartKey(), request.getNumEntries(), + request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()), allowPartialPrefixes); ListStatusLightResponse.Builder listStatusLightResponseBuilder = @@ -1504,7 +1497,7 @@ private OzoneManagerProtocolProtos.ListSnapshotResponse getSnapshots( throws IOException { ListSnapshotResponse implResponse = impl.listSnapshot( request.getVolumeName(), request.getBucketName(), request.getPrefix(), - request.getPrevSnapshot(), request.getMaxListResult()); + request.getPrevSnapshot(), (int)Math.min(request.getMaxListResult(), maxKeyListSize)); List snapshotInfoList = implResponse.getSnapshotInfos() .stream().map(SnapshotInfo::getProtobuf).collect(Collectors.toList()); @@ -1533,6 +1526,24 @@ private SetSafeModeResponse setSafeMode( .build(); } + private GetObjectTaggingResponse getObjectTagging(GetObjectTaggingRequest request) + throws IOException { + KeyArgs keyArgs = request.getKeyArgs(); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(keyArgs.getKeyName()) + .build(); + + GetObjectTaggingResponse.Builder resp = + GetObjectTaggingResponse.newBuilder(); + + Map result = impl.getObjectTagging(omKeyArgs); + + resp.addAllTags(KeyValueUtil.toProtobuf(result)); + return resp.build(); + } + private SafeModeAction toSafeModeAction( OzoneManagerProtocolProtos.SafeMode safeMode) { switch (safeMode) { @@ -1549,4 +1560,16 @@ private SafeModeAction toSafeModeAction( safeMode); } } + + private OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse getQuotaRepairStatus( + OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest req) throws IOException { + return OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse.newBuilder() + .setStatus(impl.getQuotaRepairStatus()) + .build(); + } + private OzoneManagerProtocolProtos.StartQuotaRepairResponse startQuotaRepair( + OzoneManagerProtocolProtos.StartQuotaRepairRequest req) throws IOException { + impl.startQuotaRepair(req.getBucketsList()); + return OzoneManagerProtocolProtos.StartQuotaRepairResponse.newBuilder().build(); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index e60362a1ebb..76546f2e480 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -68,7 +68,7 @@ default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termI } /** - * Implementation of {@link #handleWriteRequest(OMRequest, TermIndex, OzoneManagerDoubleBuffer)}. + * Implementation of {@link #handleWriteRequest}. * * @param omRequest the write request * @param termIndex - ratis transaction term and index diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index a6fe61eb480..420cb6c6dcb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -25,13 +25,19 @@ import java.security.cert.X509Certificate; import java.util.Iterator; import java.util.Map; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.OzoneSecretManager; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; import org.apache.hadoop.io.Text; @@ -41,6 +47,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.security.OzoneSecretStore.OzoneManagerSecretState; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier.TokenInfo; import org.apache.hadoop.security.AccessControlException; @@ -64,7 +71,7 @@ public class OzoneDelegationTokenSecretManager extends OzoneSecretManager { - private static final Logger LOG = LoggerFactory + public static final Logger LOG = LoggerFactory .getLogger(OzoneDelegationTokenSecretManager.class); private final Map currentTokens; private final OzoneSecretStore store; @@ -73,6 +80,7 @@ public class OzoneDelegationTokenSecretManager private final long tokenRemoverScanInterval; private final String omServiceId; private final OzoneManager ozoneManager; + private SecretKeyClient secretKeyClient; /** * If the delegation token update thread holds this lock, it will not get @@ -100,8 +108,8 @@ public OzoneDelegationTokenSecretManager(Builder b) throws IOException { isRatisEnabled = b.ozoneConf.getBoolean( OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + this.secretKeyClient = b.secretKeyClient; loadTokenSecretState(store.loadState()); - } /** @@ -117,6 +125,7 @@ public static class Builder { private CertificateClient certClient; private String omServiceId; private OzoneManager ozoneManager; + private SecretKeyClient secretKeyClient; public OzoneDelegationTokenSecretManager build() throws IOException { return new OzoneDelegationTokenSecretManager(this); @@ -157,6 +166,11 @@ public Builder setCertificateClient(CertificateClient certificateClient) { return this; } + public Builder setSecretKeyClient(SecretKeyClient client) { + this.secretKeyClient = client; + return this; + } + public Builder setOmServiceId(String serviceId) { this.omServiceId = serviceId; return this; @@ -195,9 +209,15 @@ public Token createToken(Text owner, Text renewer, OzoneTokenIdentifier identifier = createIdentifier(owner, renewer, realUser); updateIdentifierDetails(identifier); - - byte[] password = createPassword(identifier.getBytes(), - getCurrentKey().getPrivateKey()); + byte[] password; + if (ozoneManager.getVersionManager().isAllowed(OMLayoutFeature.DELEGATION_TOKEN_SYMMETRIC_SIGN)) { + ManagedSecretKey currentSecretKey = secretKeyClient.getCurrentSecretKey(); + identifier.setSecretKeyId(currentSecretKey.getId().toString()); + password = currentSecretKey.sign(identifier.getBytes()); + } else { + identifier.setOmCertSerialId(getCertSerialId()); + password = createPassword(identifier.getBytes(), getCurrentKey().getPrivateKey()); + } long expiryTime = identifier.getIssueDate() + getTokenRenewInterval(); // For HA ratis will take care of updating. @@ -252,7 +272,6 @@ private void updateIdentifierDetails(OzoneTokenIdentifier identifier) { identifier.setMasterKeyId(getCurrentKey().getKeyId()); identifier.setSequenceNumber(sequenceNum); identifier.setMaxDate(now + getTokenMaxLifetime()); - identifier.setOmCertSerialId(getCertSerialId()); identifier.setOmServiceId(getOmServiceId()); } @@ -433,9 +452,29 @@ private TokenInfo validateToken(OzoneTokenIdentifier identifier) /** * Validates if given hash is valid. + * HDDS-8829 changes the delegation token from sign by OM's RSA private key to secret key supported by SCM. + * The default delegation token lifetime is 7 days. + * In the 7 days period after OM is upgraded from version without HDDS-8829 to version with HDDS-8829, tokens + * signed by RSA private key, and tokens signed by secret key will coexist. After 7 days, there will be only + * tokens signed by secrete key still valid. Following logic will handle both types of tokens. */ public boolean verifySignature(OzoneTokenIdentifier identifier, byte[] password) { + String secretKeyId = identifier.getSecretKeyId(); + if (StringUtils.isNotEmpty(secretKeyId)) { + try { + ManagedSecretKey verifyKey = secretKeyClient.getSecretKey(UUID.fromString(secretKeyId)); + return verifyKey.isValidSignature(identifier.getBytes(), password); + } catch (SCMSecurityException e) { + LOG.error("verifySignature for identifier {} failed", identifier, e); + return false; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Verify an asymmetric key signed Token {}", identifier); + } + X509Certificate signerCert; try { signerCert = getCertClient().getCertificate( @@ -511,6 +550,14 @@ private byte[] validateS3AuthInfo(OzoneTokenIdentifier identifier) } + /** + * Load delegation tokens from DB into memory. + * HDDS-8829 changes the delegation token from sign by OM's RSA private key to secret key supported by SCM. + * The default delegation token lifetime is 7 days. After OM is upgraded from version without HDDS-8829 to + * version with HDDS-8829 and restarts, tokens signed by RSA private key will be loaded from DB into memory. + * Next OM restarts, if after 7 days, there will be only tokens signed by secret key loaded into memory. + * Both types of token loading should be supported. + */ private void loadTokenSecretState( OzoneManagerSecretState state) throws IOException { LOG.info("Loading token state into token manager."); @@ -528,8 +575,17 @@ private void addPersistedDelegationToken(OzoneTokenIdentifier identifier, "Can't add persisted delegation token to a running SecretManager."); } - byte[] password = createPassword(identifier.getBytes(), - getCertClient().getPrivateKey()); + byte[] password; + if (StringUtils.isNotEmpty(identifier.getSecretKeyId())) { + ManagedSecretKey signKey = secretKeyClient.getSecretKey(UUID.fromString(identifier.getSecretKeyId())); + password = signKey.sign(identifier.getBytes()); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Load an asymmetric key signed Token {}", identifier); + } + password = createPassword(identifier.getBytes(), getCertClient().getPrivateKey()); + } + if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) { setDelegationTokenSeqNum(identifier.getSequenceNumber()); } @@ -588,6 +644,11 @@ public void stop() throws IOException { } } + @VisibleForTesting + public void setSecretKeyClient(SecretKeyClient client) { + this.secretKeyClient = client; + } + /** * Remove expired delegation tokens from cache and persisted store. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java index edffd5ed74e..c7a14bb6eed 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; @@ -52,6 +53,7 @@ public final class OmTestManagers { private final BucketManager bucketManager; private final PrefixManager prefixManager; private final ScmBlockLocationProtocol scmBlockClient; + private final OzoneClient rpcClient; public OzoneManager getOzoneManager() { return om; @@ -77,6 +79,9 @@ public KeyManager getKeyManager() { public ScmBlockLocationProtocol getScmBlockClient() { return scmBlockClient; } + public OzoneClient getRpcClient() { + return rpcClient; + } public OmTestManagers(OzoneConfiguration conf) throws AuthenticationException, IOException, InterruptedException, TimeoutException { @@ -121,7 +126,8 @@ public OmTestManagers(OzoneConfiguration conf, waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == RaftServerStatus.LEADER_AND_READY, 10, 10_000); - writeClient = OzoneClientFactory.getRpcClient(conf) + rpcClient = OzoneClientFactory.getRpcClient(conf); + writeClient = rpcClient .getObjectStore().getClientProxy().getOzoneManagerClient(); metadataManager = (OmMetadataManagerImpl) HddsWhiteboxTestUtils .getInternalState(om, "metadataManager"); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java index 36245dc8741..680853cdc32 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java @@ -27,13 +27,14 @@ import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; +import java.util.List; /** - * Test that all the tables are covered both by OMDBDefinition - * as well as OmMetadataManagerImpl. + * Test that all the tables are covered both by OMDBDefinition and OmMetadataManagerImpl. */ public class TestOMDBDefinition { @@ -41,33 +42,33 @@ public class TestOMDBDefinition { private Path folder; @Test - public void testDBDefinition() throws Exception { + public void testDBDefinition() throws IOException { OzoneConfiguration configuration = new OzoneConfiguration(); File metaDir = folder.toFile(); - DBStore store = OmMetadataManagerImpl.loadDB(configuration, metaDir); - OMDBDefinition dbDef = new OMDBDefinition(); + OMDBDefinition dbDef = OMDBDefinition.get(); // Get list of tables from DB Definitions - final Collection> columnFamilyDefinitions - = dbDef.getColumnFamilies(); + final Collection> columnFamilyDefinitions = dbDef.getColumnFamilies(); final int countOmDefTables = columnFamilyDefinitions.size(); - ArrayList missingDBDefTables = new ArrayList<>(); + List missingDBDefTables = new ArrayList<>(); - // Get list of tables from the RocksDB Store - final Collection missingOmDBTables = new ArrayList<>(store.getTableNames().values()); - missingOmDBTables.remove("default"); - int countOmDBTables = missingOmDBTables.size(); - // Remove the file if it is found in both the datastructures - for (DBColumnFamilyDefinition definition : columnFamilyDefinitions) { - if (!missingOmDBTables.remove(definition.getName())) { - missingDBDefTables.add(definition.getName()); + try (DBStore store = OmMetadataManagerImpl.loadDB(configuration, metaDir, -1)) { + // Get list of tables from the RocksDB Store + final Collection missingOmDBTables = new ArrayList<>(store.getTableNames().values()); + missingOmDBTables.remove("default"); + int countOmDBTables = missingOmDBTables.size(); + // Remove the file if it is found in both the datastructures + for (DBColumnFamilyDefinition definition : columnFamilyDefinitions) { + if (!missingOmDBTables.remove(definition.getName())) { + missingDBDefTables.add(definition.getName()); + } } - } - assertEquals(0, missingDBDefTables.size(), - "Tables in OmMetadataManagerImpl are:" + missingDBDefTables); - assertEquals(0, missingOmDBTables.size(), - "Tables missing in OMDBDefinition are:" + missingOmDBTables); - assertEquals(countOmDBTables, countOmDefTables); + assertEquals(0, missingDBDefTables.size(), + "Tables in OmMetadataManagerImpl are:" + missingDBDefTables); + assertEquals(0, missingOmDBTables.size(), + "Tables missing in OMDBDefinition are:" + missingOmDBTables); + assertEquals(countOmDBTables, countOmDefTables); + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java index a4ced424522..57ac3f29078 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java @@ -131,7 +131,8 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException { final OzoneManager ozoneManager = mock(OzoneManager.class); doCallRealMethod().when(ozoneManager).checkS3MultiTenancyEnabled(); - + final OzoneConfiguration conf = new OzoneConfiguration(); + when(ozoneManager.getConfiguration()).thenReturn(conf); when(ozoneManager.isS3MultiTenancyEnabled()).thenReturn(false); final String tenantId = "test-tenant"; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index ebc6bc6cb6c..1d00ec614cd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -72,6 +73,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; @@ -314,6 +316,29 @@ public void testHardLinkCreation() throws IOException { getINode(f1FileLink.toPath()), "link matches original file"); } + + @Test + public void testGetSnapshotInfo() throws IOException { + SnapshotInfo s1 = createSnapshotInfo("vol", "buck"); + UUID latestGlobalSnapId = + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager() + .getLatestGlobalSnapshotId(); + UUID latestPathSnapId = + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager() + .getLatestPathSnapshotId(String.join("/", "vol", "buck")); + s1.setPathPreviousSnapshotId(latestPathSnapId); + s1.setGlobalPreviousSnapshotId(latestGlobalSnapId); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager() + .addSnapshot(s1); + OMException ome = assertThrows(OMException.class, + () -> om.getOmSnapshotManager().getSnapshot(s1.getSnapshotId())); + assertEquals(OMException.ResultCodes.FILE_NOT_FOUND, ome.getResult()); + // not present in snapshot chain too + SnapshotInfo s2 = createSnapshotInfo("vol", "buck"); + ome = assertThrows(OMException.class, + () -> om.getOmSnapshotManager().getSnapshot(s2.getSnapshotId())); + assertEquals(OMException.ResultCodes.FILE_NOT_FOUND, ome.getResult()); + } /* * Test that exclude list is generated correctly. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java index 0bd99d49499..41d6c28e2b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java @@ -20,16 +20,10 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.io.IOException; -import java.util.Arrays; - -import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -119,42 +113,4 @@ public void testResolveClientSideRepConfigWhenBucketHasEC3() // should return ratis. assertEquals(ratisReplicationConfig, replicationConfig); } - - @Test - public void testS3AdminExtraction() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice,bob"); - - assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration)) - .containsAll(Arrays.asList("alice", "bob")); - } - - @Test - public void testS3AdminExtractionWithFallback() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice,bob"); - - assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration)) - .containsAll(Arrays.asList("alice", "bob")); - } - - @Test - public void testS3AdminGroupExtraction() { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, - "test1, test2"); - - assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration)) - .containsAll(Arrays.asList("test1", "test2")); - } - - @Test - public void testS3AdminGroupExtractionWithFallback() { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, - "test1, test2"); - - assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration)) - .containsAll(Arrays.asList("test1", "test2")); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java deleted file mode 100644 index 4f0c15f15e5..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om; - - -import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collections; - -/** - * Test Key Trash Service. - *

    - * This test does the things including: - * 1. UTs for list trash. - * 2. UTs for recover trash. - * 3. UTs for empty trash. - *

    - */ -public class TestTrashService { - - @TempDir - private Path tempFolder; - - private KeyManager keyManager; - private OzoneManagerProtocol writeClient; - private OzoneManager om; - private String volumeName; - private String bucketName; - - @BeforeEach - void setup() throws Exception { - ExitUtils.disableSystemExit(); - OzoneConfiguration configuration = new OzoneConfiguration(); - - File folder = tempFolder.toFile(); - if (!folder.exists()) { - assertTrue(folder.mkdirs()); - } - System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); - ServerUtils.setOzoneMetaDirPath(configuration, folder.toString()); - - OmTestManagers omTestManagers - = new OmTestManagers(configuration); - keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); - volumeName = "volume"; - bucketName = "bucket"; - } - - @AfterEach - public void cleanup() throws Exception { - om.stop(); - } - - @Test - public void testRecoverTrash() throws IOException { - String keyName = "testKey"; - String destinationBucket = "destBucket"; - createAndDeleteKey(keyName); - - boolean recoverOperation = keyManager.getMetadataManager() - .recoverTrash(volumeName, bucketName, keyName, destinationBucket); - assertTrue(recoverOperation); - } - - private void createAndDeleteKey(String keyName) throws IOException { - - OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), - OmVolumeArgs.newBuilder() - .setOwnerName("owner") - .setAdminName("admin") - .setVolume(volumeName) - .build()); - - OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), - OmBucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .build()); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setAcls(Collections.emptyList()) - .setLocationInfoList(new ArrayList<>()) - .setReplicationConfig(StandaloneReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.ONE)) - .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) - .build(); - - /* Create and delete key in the Key Manager. */ - OpenKeySession session = writeClient.openKey(keyArgs); - writeClient.commitKey(keyArgs, session.getId()); - writeClient.deleteKey(keyArgs); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 125c9efcaf2..6e24c9ff93f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -44,9 +44,9 @@ import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.s3.S3SecretCacheProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosName; @@ -81,12 +81,12 @@ class TestOzoneManagerDoubleBuffer { private OzoneManagerDoubleBuffer doubleBuffer; private OzoneManager ozoneManager; private S3SecretLockedManager secretManager; - private final CreateSnapshotResponse snapshotResponse1 = mock(CreateSnapshotResponse.class); - private final CreateSnapshotResponse snapshotResponse2 = mock(CreateSnapshotResponse.class); private final OMResponse omKeyResponse = mock(OMResponse.class); private final OMResponse omBucketResponse = mock(OMResponse.class); private final OMResponse omSnapshotResponse1 = mock(OMResponse.class); private final OMResponse omSnapshotResponse2 = mock(OMResponse.class); + private final OMResponse omSnapshotPurgeResponseProto1 = mock(OMResponse.class); + private final OMResponse omSnapshotPurgeResponseProto2 = mock(OMResponse.class); private static OMClientResponse omKeyCreateResponse = mock(OMKeyCreateResponse.class); private static OMClientResponse omBucketCreateResponse = @@ -95,6 +95,9 @@ class TestOzoneManagerDoubleBuffer { mock(OMSnapshotCreateResponse.class); private static OMClientResponse omSnapshotCreateResponse2 = mock(OMSnapshotCreateResponse.class); + private static OMClientResponse omSnapshotPurgeResponse1 = mock(OMSnapshotPurgeResponse.class); + private static OMClientResponse omSnapshotPurgeResponse2 = mock(OMSnapshotPurgeResponse.class); + @TempDir private File tempDir; private OzoneManagerDoubleBuffer.FlushNotifier flushNotifier; @@ -143,19 +146,22 @@ public void setup() throws IOException { doNothing().when(omBucketCreateResponse).checkAndUpdateDB(any(), any()); doNothing().when(omSnapshotCreateResponse1).checkAndUpdateDB(any(), any()); doNothing().when(omSnapshotCreateResponse2).checkAndUpdateDB(any(), any()); + doNothing().when(omSnapshotPurgeResponse1).checkAndUpdateDB(any(), any()); + doNothing().when(omSnapshotPurgeResponse2).checkAndUpdateDB(any(), any()); when(omKeyResponse.getTraceID()).thenReturn("keyTraceId"); when(omBucketResponse.getTraceID()).thenReturn("bucketTraceId"); when(omSnapshotResponse1.getTraceID()).thenReturn("snapshotTraceId-1"); when(omSnapshotResponse2.getTraceID()).thenReturn("snapshotTraceId-2"); - when(omSnapshotResponse1.hasCreateSnapshotResponse()) - .thenReturn(true); - when(omSnapshotResponse2.hasCreateSnapshotResponse()) - .thenReturn(true); - when(omSnapshotResponse1.getCreateSnapshotResponse()) - .thenReturn(snapshotResponse1); - when(omSnapshotResponse2.getCreateSnapshotResponse()) - .thenReturn(snapshotResponse2); + when(omSnapshotPurgeResponseProto1.getTraceID()).thenReturn("snapshotPurgeTraceId-1"); + when(omSnapshotPurgeResponseProto2.getTraceID()).thenReturn("snapshotPurgeTraceId-2"); + + when(omKeyResponse.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.CreateKey); + when(omBucketResponse.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.CreateBucket); + when(omSnapshotPurgeResponseProto1.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); + when(omSnapshotPurgeResponseProto2.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); + when(omSnapshotResponse1.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); + when(omSnapshotResponse2.getCmdType()).thenReturn(OzoneManagerProtocolProtos.Type.SnapshotPurge); when(omKeyCreateResponse.getOMResponse()).thenReturn(omKeyResponse); when(omBucketCreateResponse.getOMResponse()).thenReturn(omBucketResponse); @@ -163,6 +169,10 @@ public void setup() throws IOException { .thenReturn(omSnapshotResponse1); when(omSnapshotCreateResponse2.getOMResponse()) .thenReturn(omSnapshotResponse2); + when(omSnapshotPurgeResponse1.getOMResponse()) + .thenReturn(omSnapshotPurgeResponseProto1); + when(omSnapshotPurgeResponse2.getOMResponse()) + .thenReturn(omSnapshotPurgeResponseProto2); } @AfterEach @@ -194,8 +204,35 @@ private static Stream doubleBufferFlushCases() { omSnapshotCreateResponse1, omSnapshotCreateResponse2, omBucketCreateResponse), - 4L, 4L, 14L, 16L, 1L, 1.142F) - ); + 4L, 4L, 14L, 16L, 1L, 1.142F), + Arguments.of(Arrays.asList(omSnapshotPurgeResponse1, + omSnapshotPurgeResponse2), + 2L, 2L, 16L, 18L, 1L, 1.125F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omBucketCreateResponse, + omSnapshotPurgeResponse1, + omSnapshotPurgeResponse2), + 3L, 4L, 19L, 22L, 2L, 1.157F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omSnapshotPurgeResponse1, + omBucketCreateResponse, + omSnapshotPurgeResponse2), + 4L, 4L, 23L, 26L, 1L, 1.1300F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omSnapshotPurgeResponse1, + omSnapshotPurgeResponse2, + omBucketCreateResponse), + 4L, 4L, 27L, 30L, 1L, 1.111F), + Arguments.of(Arrays.asList(omKeyCreateResponse, + omBucketCreateResponse, + omSnapshotPurgeResponse1, + omSnapshotCreateResponse1, + omSnapshotPurgeResponse2, + omBucketCreateResponse, + omSnapshotCreateResponse2), + 6L, 7L, 33L, 37L, 2L, 1.121F) + + ); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 54b04260d55..eb13f97d237 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -107,6 +108,7 @@ public void setup() throws IOException { when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) @@ -450,6 +452,11 @@ private OMClientResponse createVolume(String volumeName, OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest(omRequest); + try { + omVolumeCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + throw new RuntimeException(e); + } final TermIndex termIndex = TransactionInfo.getTermIndex(transactionId); OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); @@ -462,7 +469,7 @@ private OMClientResponse createVolume(String volumeName, * @return OMBucketCreateResponse */ private OMBucketCreateResponse createBucket(String volumeName, - String bucketName, long transactionID) { + String bucketName, long transactionID) { BucketInfo.Builder bucketInfo = newBucketInfoBuilder(bucketName, volumeName) @@ -472,6 +479,10 @@ private OMBucketCreateResponse createBucket(String volumeName, OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(omRequest); + try { + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + } final TermIndex termIndex = TermIndex.valueOf(term, transactionID); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index c807c04688d..eff23a18e6e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -30,9 +30,11 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -40,6 +42,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfigValidator; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.ClientVersion; @@ -109,6 +112,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.logging.log4j.util.Strings; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; @@ -749,17 +753,17 @@ public static OMRequest.Builder newCreateBucketRequest( .setClientId(UUID.randomUUID().toString()); } - public static List< HddsProtos.KeyValue> getMetadataList() { - List metadataList = new ArrayList<>(); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue( + public static List< KeyValue> getMetadataList() { + List metadataList = new ArrayList<>(); + metadataList.add(KeyValue.newBuilder().setKey("key1").setValue( "value1").build()); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue( + metadataList.add(KeyValue.newBuilder().setKey("key2").setValue( "value2").build()); return metadataList; } - public static HddsProtos.KeyValue fsoMetadata() { - return HddsProtos.KeyValue.newBuilder() + public static KeyValue fsoMetadata() { + return KeyValue.newBuilder() .setKey(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS) .setValue(Boolean.FALSE.toString()) .build(); @@ -1050,7 +1054,7 @@ public static OMRequest createCommitPartMPURequest(String volumeName, .setMultipartNumber(partNumber) .setMultipartUploadID(multipartUploadID) .addAllKeyLocations(new ArrayList<>()) - .addMetadata(HddsProtos.KeyValue.newBuilder() + .addMetadata(KeyValue.newBuilder() .setKey(OzoneConsts.ETAG) .setValue(DatatypeConverter.printHexBinary( new DigestInputStream( @@ -1321,6 +1325,69 @@ public static OMRequest createSnapshotRequest(String volumeName, .build(); } + public static OMRequest moveSnapshotTableKeyRequest(UUID snapshotId, + List>> deletedKeys, + List>> deletedDirs, + List> renameKeys) { + List deletedMoveKeys = new ArrayList<>(); + for (Pair> deletedKey : deletedKeys) { + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() + .setKey(deletedKey.getKey()) + .addAllKeyInfos( + deletedKey.getValue().stream() + .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())) + .build(); + deletedMoveKeys.add(snapshotMoveKeyInfos); + } + + List deletedDirMoveKeys = new ArrayList<>(); + for (Pair> deletedKey : deletedDirs) { + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() + .setKey(deletedKey.getKey()) + .addAllKeyInfos( + deletedKey.getValue().stream() + .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)) + .collect(Collectors.toList())) + .build(); + deletedDirMoveKeys.add(snapshotMoveKeyInfos); + } + + List renameKeyList = new ArrayList<>(); + for (Pair renameKey : renameKeys) { + KeyValue.Builder keyValue = KeyValue.newBuilder(); + keyValue.setKey(renameKey.getKey()); + if (!Strings.isBlank(renameKey.getValue())) { + keyValue.setValue(renameKey.getValue()); + } + renameKeyList.add(keyValue.build()); + } + + + OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest snapshotMoveTableKeysRequest = + OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest.newBuilder() + .setFromSnapshotID(HddsUtils.toProtobuf(snapshotId)) + .addAllDeletedKeys(deletedMoveKeys) + .addAllDeletedDirs(deletedDirMoveKeys) + .addAllRenamedKeys(renameKeyList) + .build(); + + OzoneManagerProtocolProtos.UserInfo userInfo = + OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName("user") + .setHostName("host") + .setRemoteAddress("remote-address") + .build(); + + return OMRequest.newBuilder() + .setSnapshotMoveTableKeysRequest(snapshotMoveTableKeysRequest) + .setCmdType(Type.SnapshotMoveTableKeys) + .setClientId(UUID.randomUUID().toString()) + .setUserInfo(userInfo) + .build(); + } + /** * Create OMRequest for Rename Snapshot. * diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java index bb3e3930059..c0f63e4d559 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java @@ -110,9 +110,9 @@ public void testGetRequestInstanceFromMap() { LOG.info("Validated request class instantiation for cmdType " + k); }); - assertEquals(13, omKeyReqsFSO.size()); - assertEquals(14, omKeyReqsLegacy.size()); - assertEquals(14, omKeyReqsOBS.size()); + assertEquals(15, omKeyReqsFSO.size()); + assertEquals(16, omKeyReqsLegacy.size()); + assertEquals(16, omKeyReqsOBS.size()); // Check if the number of instantiated OMKeyRequest classes is equal to // the number of keys in the mapping. assertEquals( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 201c2a759fc..59debe08a61 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -41,6 +42,7 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newCreateBucketRequest; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -336,7 +338,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, assertNull(omMetadataManager.getBucketTable().get(bucketKey)); OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); @@ -355,8 +357,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, dbBucketInfo.getCreationTime()); assertEquals(bucketInfoFromProto.getModificationTime(), dbBucketInfo.getModificationTime()); - assertEquals(bucketInfoFromProto.getAcls(), - dbBucketInfo.getAcls()); + assertTrue(dbBucketInfo.getAcls().containsAll(bucketInfoFromProto.getAcls())); assertEquals(bucketInfoFromProto.getIsVersionEnabled(), dbBucketInfo.getIsVersionEnabled()); assertEquals(bucketInfoFromProto.getStorageType(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java index 3a1d22f08a8..029b1f9082b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -34,6 +35,7 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newCreateBucketRequest; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketLayoutProto.FILE_SYSTEM_OPTIMIZED; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -143,7 +145,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, assertNull(omMetadataManager.getBucketTable().get(bucketKey)); OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); @@ -166,8 +168,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, dbBucketInfo.getCreationTime()); assertEquals(bucketInfoFromProto.getModificationTime(), dbBucketInfo.getModificationTime()); - assertEquals(bucketInfoFromProto.getAcls(), - dbBucketInfo.getAcls()); + assertTrue(dbBucketInfo.getAcls().containsAll(bucketInfoFromProto.getAcls())); assertEquals(bucketInfoFromProto.getIsVersionEnabled(), dbBucketInfo.getIsVersionEnabled()); assertEquals(bucketInfoFromProto.getStorageType(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 7af60c18d94..9df26293d0e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -68,6 +69,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; @@ -97,6 +99,7 @@ public void setup() throws Exception { folder.toAbsolutePath().toString()); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); AuditLogger auditLogger = mock(AuditLogger.class); @@ -182,7 +185,7 @@ public void testValidateAndUpdateCache() throws Exception { omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -221,6 +224,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceed() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -309,7 +313,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -429,6 +433,7 @@ public void testCreateDirectoryOMMetric() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -479,7 +484,7 @@ public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -509,7 +514,7 @@ private void verifyDirectoriesInheritAcls(String volumeName, List omKeyAcls = omKeyInfo.getAcls(); - assertEquals(expectedInheritAcls, omKeyAcls, "Failed to inherit parent acls!,"); + assertTrue(omKeyAcls.containsAll(expectedInheritAcls), "Failed to inherit parent acls!,"); prefix = dirName + OZONE_URI_DELIMITER; expectedInheritAcls = omKeyAcls; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index e0460ba81a9..fca7efba169 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import jakarta.annotation.Nonnull; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -97,6 +98,7 @@ public void setup() throws Exception { OMRequestTestUtils.configureFSOptimizedPaths(ozoneConfiguration, true); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); auditLogger = mock(AuditLogger.class); @@ -168,7 +170,7 @@ public void testValidateAndUpdateCache() throws Exception { omDirCreateRequestFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateRequestFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -208,6 +210,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() omDirCreateRequestFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateRequestFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L); assertSame(omClientResponse.getOMResponse().getStatus(), @@ -316,7 +319,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -569,6 +572,7 @@ public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -603,7 +607,7 @@ public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -642,6 +646,7 @@ public void testCreateDirectoryOMMetric() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -694,7 +699,7 @@ public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); assertSame(omClientResponse.getOMResponse().getStatus(), @@ -729,7 +734,7 @@ private void verifyDirectoriesInheritAcls(List dirs, System.out.println( " subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 20da9d3e5dc..cdad3bcb18e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import jakarta.annotation.Nonnull; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -481,7 +482,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, System.out.println( " subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); @@ -513,9 +514,9 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, // Should inherit parent DEFAULT acls // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] - assertEquals(parentDefaultAcl.stream() + assertTrue(keyAcls.containsAll(parentDefaultAcl.stream() .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + .collect(Collectors.toList())), "Failed to inherit bucket DEFAULT acls!"); // Should not inherit parent ACCESS acls assertThat(keyAcls).doesNotContain(parentAccessAcl); @@ -529,7 +530,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, ".snapshot/a/b/keyName,Cannot create key under path reserved for snapshot: .snapshot/", ".snapshot,Cannot create key with reserved name: .snapshot"}) public void testPreExecuteWithInvalidKeyPrefix(String invalidKeyName, - String expectedErrorMessage) { + String expectedErrorMessage) throws IOException { OMRequest omRequest = createFileRequest(volumeName, bucketName, invalidKeyName, HddsProtos.ReplicationFactor.ONE, @@ -644,8 +645,10 @@ protected OMRequest createFileRequest( * @return OMFileCreateRequest reference */ @Nonnull - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { - return new OMFileCreateRequest(omRequest, getBucketLayout()); + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) throws IOException { + OMFileCreateRequest request = new OMFileCreateRequest(omRequest, getBucketLayout()); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index e988949c5b8..5a8c638141f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -28,9 +28,11 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.junit.jupiter.api.Test; +import java.io.IOException; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; @@ -236,9 +238,11 @@ private OmDirectoryInfo getDirInfo(String key) } @Override - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { - return new OMFileCreateRequestWithFSO(omRequest, + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) throws IOException { + OMFileCreateRequest request = new OMFileCreateRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index cbb782e184f..9eb8738b9d4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -26,18 +26,23 @@ import java.util.List; import java.util.UUID; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.key.OMDirectoriesPurgeResponseWithFSO; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import jakarta.annotation.Nonnull; @@ -109,7 +114,7 @@ private void updateBlockInfo(OmKeyInfo omKeyInfo) throws IOException { * Create OMRequest which encapsulates DeleteKeyRequest. * @return OMRequest */ - private OMRequest createPurgeKeysRequest(String purgeDeletedDir, + private OMRequest createPurgeKeysRequest(String fromSnapshot, String purgeDeletedDir, List keyList, OmBucketInfo bucketInfo) throws IOException { List purgePathRequestList = new ArrayList<>(); @@ -127,7 +132,9 @@ private OMRequest createPurgeKeysRequest(String purgeDeletedDir, OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); purgeDirRequest.addAllDeletedPath(purgePathRequestList); - + if (fromSnapshot != null) { + purgeDirRequest.setSnapshotTableKey(fromSnapshot); + } OzoneManagerProtocolProtos.OMRequest omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.PurgeDirectories) @@ -138,8 +145,7 @@ private OMRequest createPurgeKeysRequest(String purgeDeletedDir, } private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest( final long volumeId, final long bucketId, final String purgeDeletedDir, - final List purgeDeletedFiles, - final List markDirsAsDeleted) { + final List purgeDeletedFiles, final List markDirsAsDeleted) { // Put all keys to be purged in a list OzoneManagerProtocolProtos.PurgePathRequest.Builder purgePathsRequest = OzoneManagerProtocolProtos.PurgePathRequest.newBuilder(); @@ -182,13 +188,13 @@ public void testValidateAndUpdateCacheCheckQuota() throws Exception { // Create and Delete keys. The keys should be moved to DeletedKeys table List deletedKeyInfos = createAndDeleteKeys(1, null); // The keys should be present in the DeletedKeys table before purging - List deletedKeyNames = validateDeletedKeysTable(deletedKeyInfos); + List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); // Create PurgeKeysRequest to purge the deleted keys String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); - OMRequest omRequest = createPurgeKeysRequest( + OMRequest omRequest = createPurgeKeysRequest(null, null, deletedKeyInfos, omBucketInfo); OMRequest preExecutedRequest = preExecute(omRequest); OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = @@ -205,7 +211,59 @@ public void testValidateAndUpdateCacheCheckQuota() throws Exception { performBatchOperationCommit(omClientResponse); // The keys should exist in the DeletedKeys table after dir delete - validateDeletedKeys(deletedKeyNames); + validateDeletedKeys(omMetadataManager, deletedKeyNames); + } + + @Test + public void testValidateAndUpdateCacheSnapshotLastTransactionInfoUpdated() throws Exception { + // Create and Delete keys. The keys should be moved to DeletedKeys table + List deletedKeyInfos = createAndDeleteKeys(1, null); + // The keys should be present in the DeletedKeys table before purging + List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); + + String snapshotName = "snap1"; + SnapshotInfo snapshotInfo = createSnapshot(snapshotName); + ReferenceCounted rcOmSnapshot = ozoneManager.getOmSnapshotManager() + .getSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), snapshotInfo.getName()); + // Keys should be present in snapshot + validateDeletedKeysTable(rcOmSnapshot.get().getMetadataManager(), deletedKeyInfos, true); + // keys should have been moved from AOS + validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, false); + + // Create PurgeKeysRequest to purge the deleted keys + assertEquals(snapshotInfo.getLastTransactionInfo(), + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + bucketKey); + OMRequest omRequest = createPurgeKeysRequest(snapshotInfo.getTableKey(), + null, deletedKeyInfos, omBucketInfo); + OMRequest preExecutedRequest = preExecute(omRequest); + OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = + new OMDirectoriesPurgeRequestWithFSO(preExecutedRequest); + + assertEquals(1000L * deletedKeyNames.size(), omBucketInfo.getUsedBytes()); + OMDirectoriesPurgeResponseWithFSO omClientResponse + = (OMDirectoriesPurgeResponseWithFSO) omKeyPurgeRequest + .validateAndUpdateCache(ozoneManager, 100L); + + SnapshotInfo snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapshotInfo.getTableKey()); + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotInfo.getTableKey()); + + assertEquals(snapshotInfoOnDisk, snapshotInfo); + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(TransactionInfo.getTermIndex(100L)) + .toByteString()); + assertEquals(snapshotInfo, updatedSnapshotInfo); + omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey); + assertEquals(0L * deletedKeyNames.size(), omBucketInfo.getUsedBytes()); + + performBatchOperationCommit(omClientResponse); + + // The keys should exist in the DeletedKeys table after dir delete + validateDeletedKeys(rcOmSnapshot.get().getMetadataManager(), deletedKeyNames); + snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapshotInfo.getTableKey()); + assertEquals(snapshotInfo, snapshotInfoOnDisk); + rcOmSnapshot.close(); } @Test @@ -214,13 +272,13 @@ public void testValidateAndUpdateCacheQuotaBucketRecreated() // Create and Delete keys. The keys should be moved to DeletedKeys table List deletedKeyInfos = createAndDeleteKeys(1, null); // The keys should be present in the DeletedKeys table before purging - List deletedKeyNames = validateDeletedKeysTable(deletedKeyInfos); + List deletedKeyNames = validateDeletedKeysTable(omMetadataManager, deletedKeyInfos, true); // Create PurgeKeysRequest to purge the deleted keys String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( bucketKey); - OMRequest omRequest = createPurgeKeysRequest( + OMRequest omRequest = createPurgeKeysRequest(null, null, deletedKeyInfos, omBucketInfo); OMRequest preExecutedRequest = preExecute(omRequest); OMDirectoriesPurgeRequestWithFSO omKeyPurgeRequest = @@ -258,35 +316,32 @@ public void testValidateAndUpdateCacheQuotaBucketRecreated() performBatchOperationCommit(omClientResponse); // The keys should exist in the DeletedKeys table after dir delete - validateDeletedKeys(deletedKeyNames); + validateDeletedKeys(omMetadataManager, deletedKeyNames); } - private void performBatchOperationCommit( - OMDirectoriesPurgeResponseWithFSO omClientResponse) throws IOException { + private void performBatchOperationCommit(OMDirectoriesPurgeResponseWithFSO omClientResponse) throws IOException { try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); } } @Nonnull - private List validateDeletedKeysTable( - List deletedKeyInfos) throws IOException { + private List validateDeletedKeysTable(OMMetadataManager omMetadataManager, + List deletedKeyInfos, boolean keyExists) throws IOException { List deletedKeyNames = new ArrayList<>(); for (OmKeyInfo deletedKey : deletedKeyInfos) { String keyName = omMetadataManager.getOzoneKey(deletedKey.getVolumeName(), deletedKey.getBucketName(), deletedKey.getKeyName()); - assertTrue(omMetadataManager.getDeletedTable().isExist(keyName)); + assertEquals(omMetadataManager.getDeletedTable().isExist(keyName), keyExists); deletedKeyNames.add(keyName); } return deletedKeyNames; } - private void validateDeletedKeys( + private void validateDeletedKeys(OMMetadataManager omMetadataManager, List deletedKeyNames) throws IOException { for (String deletedKey : deletedKeyNames) { assertTrue(omMetadataManager.getDeletedTable().isExist( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 13f0191b29a..1fc0cb6ebad 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -56,6 +56,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; @@ -375,10 +377,19 @@ public void testValidateAndUpdateCacheWithUncommittedBlocks() } - @Test - public void testRejectHsyncIfNotEnabled() throws Exception { + /** + * In these scenarios below, OM should reject key commit with HSync requested from a client: + * 1. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = false + * 2. ozone.hbase.enhancements.allowed = false, ozone.fs.hsync.enabled = true + * 3. ozone.hbase.enhancements.allowed = true, ozone.fs.hsync.enabled = false + */ + @ParameterizedTest + @CsvSource({"false,false", "false,true", "true,false"}) + public void testRejectHsyncIfNotEnabled(boolean hbaseEnhancementsEnabled, boolean fsHsyncEnabled) throws Exception { OzoneConfiguration conf = ozoneManager.getConfiguration(); - conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, false); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, hbaseEnhancementsEnabled); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); + conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, fsHsyncEnabled); BucketLayout bucketLayout = getBucketLayout(); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, @@ -392,6 +403,9 @@ public void testRejectHsyncIfNotEnabled() throws Exception { // Regular key commit should still work doKeyCommit(false, allocatedKeyLocationList.subList(0, 5)); + // Restore config after this test run + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 4bfdd333296..b9b7c30744e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockProvider; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -501,6 +502,7 @@ public void testOverwritingExistingMetadata( createKeyRequest(false, 0, keyName, initialMetadata); OMKeyCreateRequest initialOmKeyCreateRequest = new OMKeyCreateRequest(initialRequest, getBucketLayout()); + initialOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse initialResponse = initialOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); verifyMetadataInResponse(initialResponse, initialMetadata); @@ -519,6 +521,7 @@ public void testOverwritingExistingMetadata( createKeyRequest(false, 0, keyName, updatedMetadata); OMKeyCreateRequest updatedOmKeyCreateRequest = new OMKeyCreateRequest(updatedRequest, getBucketLayout()); + updatedOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse updatedResponse = updatedOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); @@ -562,6 +565,7 @@ public void testCreationWithoutMetadataFollowedByOverwriteWithMetadata( createKeyRequest(false, 0, keyName, overwriteMetadata, emptyMap(), emptyList()); OMKeyCreateRequest overwriteOmKeyCreateRequest = new OMKeyCreateRequest(overwriteRequestWithMetadata, getBucketLayout()); + overwriteOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); // Perform the overwrite operation and capture the response OMClientResponse overwriteResponse = @@ -989,7 +993,7 @@ public void testAtomicRewrite( // Retrieve the committed key info OmKeyInfo existingKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(getOzoneKey()); List existingAcls = existingKeyInfo.getAcls(); - assertEquals(acls, existingAcls); + assertThat(existingAcls.containsAll(acls)); // Create a request with a generation which doesn't match the current key omRequest = createKeyRequest(false, 0, 100, @@ -1039,9 +1043,9 @@ private void verifyKeyInheritAcls(List keyAcls, .findAny().orElse(null); // Should inherit parent DEFAULT Acls - assertEquals(parentDefaultAcl.stream() + assertTrue(keyAcls.containsAll(parentDefaultAcl.stream() .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + .collect(Collectors.toList())), "Failed to inherit parent DEFAULT acls!,"); // Should not inherit parent ACCESS Acls @@ -1054,7 +1058,7 @@ protected void addToKeyTable(String keyName) throws Exception { } - private void checkNotAValidPath(String keyName) { + private void checkNotAValidPath(String keyName) throws IOException { OMRequest omRequest = createKeyRequest(false, 0, keyName); OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest); OMException ex = @@ -1137,13 +1141,16 @@ protected String getOzoneKey() throws IOException { return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); } - protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) { - return new OMKeyCreateRequest(omRequest, getBucketLayout()); + protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequest(omRequest, getBucketLayout()); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected OMKeyCreateRequest getOMKeyCreateRequest( - OMRequest omRequest, BucketLayout layout) { - return new OMKeyCreateRequest(omRequest, layout); + OMRequest omRequest, BucketLayout layout) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequest(omRequest, layout); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index a5181b25a0e..8f8cc025436 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -210,15 +211,19 @@ protected String getOzoneKey() throws IOException { } @Override - protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) { - return new OMKeyCreateRequestWithFSO(omRequest, + protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected OMKeyCreateRequest getOMKeyCreateRequest( - OMRequest omRequest, BucketLayout layout) { - return new OMKeyCreateRequestWithFSO(omRequest, layout); + OMRequest omRequest, BucketLayout layout) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequestWithFSO(omRequest, layout); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index a912f549b3c..c323fecd501 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,12 +23,10 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.junit.jupiter.api.Test; @@ -42,12 +40,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.db.BatchOperation; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.when; /** * Tests {@link OMKeyPurgeRequest} and {@link OMKeyPurgeResponse}. @@ -115,35 +111,6 @@ private OMRequest createPurgeKeysRequest(List deletedKeys, .build(); } - /** - * Create snapshot and checkpoint directory. - */ - private SnapshotInfo createSnapshot(String snapshotName) throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - OMRequest omRequest = OMRequestTestUtils - .createSnapshotRequest(volumeName, bucketName, snapshotName); - // Pre-Execute OMSnapshotCreateRequest. - OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); - - // validateAndUpdateCache OMSnapshotCreateResponse. - OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); - // Add to batch and commit to DB. - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - batchOperation.close(); - - String key = SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName); - SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); - assertNotNull(snapshotInfo); - return snapshotInfo; - } - private OMRequest preExecute(OMRequest originalOmRequest) throws IOException { OMKeyPurgeRequest omKeyPurgeRequest = new OMKeyPurgeRequest(originalOmRequest); @@ -205,22 +172,15 @@ public void testKeyPurgeInSnapshot() throws Exception { List deletedKeyNames = createAndDeleteKeys(1, null); SnapshotInfo snapInfo = createSnapshot("snap1"); + assertEquals(snapInfo.getLastTransactionInfo(), + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); // The keys should be not present in the active Db's deletedTable for (String deletedKey : deletedKeyNames) { assertFalse(omMetadataManager.getDeletedTable().isExist(deletedKey)); } - SnapshotInfo fromSnapshotInfo = new SnapshotInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setName("snap1") - .build(); - - ReferenceCounted rcOmSnapshot = - ozoneManager.getOmSnapshotManager().getSnapshot( - fromSnapshotInfo.getVolumeName(), - fromSnapshotInfo.getBucketName(), - fromSnapshotInfo.getName()); + ReferenceCounted rcOmSnapshot = ozoneManager.getOmSnapshotManager() + .getSnapshot(snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName()); OmSnapshot omSnapshot = rcOmSnapshot.get(); // The keys should be present in the snapshot's deletedTable @@ -239,6 +199,12 @@ public void testKeyPurgeInSnapshot() throws Exception { omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L); + SnapshotInfo snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapInfo.getTableKey()); + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapInfo.getTableKey()); + assertEquals(snapshotInfoOnDisk, snapInfo); + snapInfo.setLastTransactionInfo(TransactionInfo.valueOf(TransactionInfo.getTermIndex(100L)) + .toByteString()); + assertEquals(snapInfo, updatedSnapshotInfo); OMResponse omResponse = OMResponse.newBuilder() .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance()) .setCmdType(Type.PurgeKeys) @@ -248,14 +214,14 @@ public void testKeyPurgeInSnapshot() throws Exception { try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( - omResponse, deletedKeyNames, fromSnapshotInfo, null); + OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse(omResponse, deletedKeyNames, snapInfo, null); omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); // Do manual commit and see whether addToBatch is successful or not. omMetadataManager.getStore().commitBatchOperation(batchOperation); } - + snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapInfo.getTableKey()); + assertEquals(snapshotInfoOnDisk, snapInfo); // The keys should not exist in the DeletedKeys table for (String deletedKey : deletedKeyNames) { assertFalse(omSnapshot.getMetadataManager() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index c1b0e45e6d6..c18e1ee7c3f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMPerformanceMetrics; @@ -43,9 +44,15 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; import org.apache.hadoop.security.UserGroupInformation; @@ -53,6 +60,7 @@ import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.apache.hadoop.hdds.client.ContainerBlockID; @@ -78,6 +86,10 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.setupReplicationConfigValidation; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; @@ -132,6 +144,7 @@ public void setup() throws Exception { folder.toAbsolutePath().toString()); ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, folder.toAbsolutePath().toString()); + ozoneConfiguration.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); ozoneConfiguration.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); @@ -236,7 +249,7 @@ public void setup() throws Exception { .thenReturn(bucket); when(ozoneManager.resolveBucketLink(any(Pair.class))) .thenReturn(bucket); - OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); + OmSnapshotManager omSnapshotManager = Mockito.spy(new OmSnapshotManager(ozoneManager)); when(ozoneManager.getOmSnapshotManager()) .thenReturn(omSnapshotManager); @@ -284,4 +297,51 @@ public void stop() { omMetrics.unRegister(); framework().clearInlineMocks(); } + + /** + * Create snapshot and checkpoint directory. + */ + protected SnapshotInfo createSnapshot(String snapshotName) throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + BatchOperation batchOperation = omMetadataManager.getStore() + .initBatchOperation(); + OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils + .createSnapshotRequest(volumeName, bucketName, snapshotName); + // Pre-Execute OMSnapshotCreateRequest. + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); + + // validateAndUpdateCache OMSnapshotCreateResponse. + OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); + // Add to batch and commit to DB. + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + batchOperation.close(); + + String key = SnapshotInfo.getTableKey(volumeName, + bucketName, snapshotName); + SnapshotInfo snapshotInfo = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNotNull(snapshotInfo); + return snapshotInfo; + } + + @Test + public void testValidateKeyArgs() { + OMKeyRequest.ValidateKeyArgs validateKeyArgs1 = new OMKeyRequest.ValidateKeyArgs.Builder() + .setKeyName("tmpKey").setSnapshotReservedWord("tmpSnapshotReservedWord").build(); + assertEquals("tmpSnapshotReservedWord", validateKeyArgs1.getSnapshotReservedWord()); + assertEquals("tmpKey", validateKeyArgs1.getKeyName()); + assertTrue(validateKeyArgs1.isValidateKeyName()); + assertTrue(validateKeyArgs1.isValidateSnapshotReserved()); + + OMKeyRequest.ValidateKeyArgs validateKeyArgs2 = new OMKeyRequest.ValidateKeyArgs.Builder() + .setKeyName("tmpKey2").build(); + assertNull(validateKeyArgs2.getSnapshotReservedWord()); + assertEquals("tmpKey2", validateKeyArgs2.getKeyName()); + assertTrue(validateKeyArgs2.isValidateKeyName()); + assertFalse(validateKeyArgs2.isValidateSnapshotReserved()); + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index f02e1ee2367..0220afbc60c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -447,6 +448,7 @@ private List createMPUsWithFSO(String volume, String bucket, S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + s3InitiateMultipartUploadRequest.setUGI(UserGroupInformation.getLoginUser()); OMClientResponse omClientResponse = s3InitiateMultipartUploadRequest .validateAndUpdateCache(ozoneManager, trxnLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 30b76801d9e..f9006b852e4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -250,7 +250,7 @@ private void verifyKeyInheritAcls(List keyAcls, // Should inherit parent DEFAULT Acls // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] - assertEquals(parentDefaultAcl, keyAcls, + assertTrue(keyAcls.containsAll(parentDefaultAcl), "Failed to inherit parent DEFAULT acls!"); // Should not inherit parent ACCESS Acls diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index 1d4eb5310e0..d92992edf58 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -166,9 +167,11 @@ private long verifyDirectoriesInDB(List dirs, final long volumeId, @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getLoginUser()); + return request; } @Test @@ -256,7 +259,7 @@ private void verifyKeyInheritAcls(List dirs, OmKeyInfo fileInfo, List omDirAcls = omDirInfo.getAcls(); System.out.println(" subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index bd93fe176e9..ff920667539 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; @@ -114,6 +115,7 @@ public void setup() throws Exception { when(lvm.getMetadataLayoutVersion()).thenReturn(0); when(ozoneManager.getVersionManager()).thenReturn(lvm); when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); } @@ -353,21 +355,27 @@ protected OMRequest doPreExecuteInitiateMPUWithFSO( } protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq( - OMRequest omRequest) { - return new S3MultipartUploadCompleteRequest(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCompleteRequest request = new S3MultipartUploadCompleteRequest(omRequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequest(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequest(omRequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequest(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequest(initiateMPURequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java index 3c710988a56..7e92cf042e7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java @@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.UUID; @@ -45,9 +46,11 @@ protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq( @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 014b4e021cb..fa901af6457 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -71,7 +71,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { bucketName, keyName); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - getS3InitiateMultipartUploadReq(initiateMPURequest); + getS3InitiateMultipartUploadReq(initiateMPURequest); OMClientResponse omClientResponse = s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, 1L); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 24480c249cc..eb2c82af172 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.ArrayList; @@ -49,16 +50,20 @@ public class TestS3MultipartUploadCommitPartRequestWithFSO @Override protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 1762f38b44b..dc58254d7d3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.ArrayList; @@ -113,23 +114,29 @@ protected String getOzoneDBKey(String volumeName, String bucketName, @Override protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq( - OMRequest omRequest) { - return new S3MultipartUploadCompleteRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCompleteRequest request = new S3MultipartUploadCompleteRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java new file mode 100644 index 00000000000..9c307d85671 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Test delete object tagging request. + */ +public class TestS3DeleteObjectTaggingRequest extends TestOMKeyRequest { + + @Test + public void testPreExecute() throws Exception { + doPreExecute(volumeName, bucketName, keyName); + } + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map tags = getTags(5); + String ozoneKey = addKeyToTable(tags); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(tags.size(), omKeyInfo.getTags().size()); + + OMRequest originalRequest = createDeleteObjectTaggingRequest(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getDeleteObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getDeleteObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.DeleteObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(updatedKeyInfo); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertEquals(0, updatedKeyInfo.getTags().size()); + } + + @Test + public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheBucketNotFound() throws Exception { + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheKeyNotFound() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + protected OMRequest doPreExecute(String volumeName, String bucketName, + String keyName) throws Exception { + OMRequest originalRequest = createDeleteObjectTaggingRequest( + volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + + return modifiedRequest; + } + + public OMRequest createDeleteObjectTaggingRequest(String volumeName, + String bucketName, + String keyName) { + KeyArgs.Builder keyArgs = KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName); + + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = + DeleteObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + return OMRequest.newBuilder() + .setDeleteObjectTaggingRequest(deleteObjectTaggingRequest) + .setCmdType(Type.DeleteObjectTagging) + .setClientId(UUID.randomUUID().toString()) + .build(); + } + + private void verifyRequest(OMRequest modifiedRequest, OMRequest originalRequest) { + + KeyArgs original = originalRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + + KeyArgs updated = modifiedRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + + assertEquals(original.getVolumeName(), updated.getVolumeName()); + assertEquals(original.getBucketName(), updated.getBucketName()); + assertEquals(original.getKeyName(), updated.getKeyName()); + assertEquals(original.getTagsList(), updated.getTagsList()); + // Modification time will not be set for object tagging request + assertFalse(updated.hasModificationTime()); + } + + protected String addKeyToTable(Map tags) throws Exception { + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addKeyToTable(false, false, omKeyInfo, + clientID, 1L, omMetadataManager); + return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + } + + protected S3DeleteObjectTaggingRequest getDeleteObjectTaggingRequest(OMRequest originalRequest) { + return new S3DeleteObjectTaggingRequest(originalRequest, getBucketLayout()); + } + + protected Map getTags(int size) { + Map tags = new HashMap<>(); + for (int i = 0; i < size; i++) { + tags.put("tag-key-" + UUID.randomUUID(), "tag-value-" + UUID.randomUUID()); + } + return tags; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..ca3010a9b29 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; + +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test delete object tagging request for FSO bucket. + */ +public class TestS3DeleteObjectTaggingRequestWithFSO extends TestS3DeleteObjectTaggingRequest { + + private static final String PARENT_DIR = "c/d/e"; + private static final String FILE_NAME = "file1"; + private static final String FILE_KEY = PARENT_DIR + "/" + FILE_NAME; + + @Override + protected String addKeyToTable(Map tags) throws Exception { + keyName = FILE_KEY; // updated key name + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, PARENT_DIR, omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(1L) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); + final long volumeId = omMetadataManager.getVolumeId( + omKeyInfo.getVolumeName()); + final long bucketId = omMetadataManager.getBucketId( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + return omMetadataManager.getOzonePathKey( + volumeId, bucketId, omKeyInfo.getParentObjectID(), + omKeyInfo.getFileName()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected S3DeleteObjectTaggingRequest getDeleteObjectTaggingRequest(OMRequest originalRequest) { + return new S3DeleteObjectTaggingRequestWithFSO(originalRequest, getBucketLayout()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java new file mode 100644 index 00000000000..c70c2587332 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test put object tagging request. + */ +public class TestS3PutObjectTaggingRequest extends TestOMKeyRequest { + + @Test + public void testPreExecute() throws Exception { + Map tags = new HashMap<>(); + getTags(2); + doPreExecute(volumeName, bucketName, keyName, tags); + } + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + String ozoneKey = addKeyToTable(); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + + assertNotNull(omKeyInfo); + assertTrue(omKeyInfo.getTags().isEmpty()); + + Map tags = getTags(5); + + OMRequest originalRequest = createPutObjectTaggingRequest(volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getPutObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getPutObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.PutObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(updatedKeyInfo); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertEquals(tags.size(), updatedKeyInfo.getTags().size()); + for (Map.Entry tag: tags.entrySet()) { + String value = updatedKeyInfo.getTags().get(tag.getKey()); + assertNotNull(value); + assertEquals(tag.getValue(), value); + } + } + + @Test + public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheBucketNotFound() throws Exception { + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheKeyNotFound() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheEmptyTagSet() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + String ozoneKey = addKeyToTable(); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertTrue(omKeyInfo.getTags().isEmpty()); + + Map tags = getTags(0); + + OMRequest originalRequest = createPutObjectTaggingRequest(volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getPutObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 1L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getPutObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.PutObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertTrue(omKeyInfo.getTags().isEmpty()); + assertEquals(tags.size(), updatedKeyInfo.getTags().size()); + } + + + protected OMRequest doPreExecute(String volumeName, + String bucketName, + String keyName, + Map tags) throws Exception { + OMRequest originalRequest = createPutObjectTaggingRequest( + volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + + return modifiedRequest; + } + + private OMRequest createPutObjectTaggingRequest(String volumeName, + String bucketName, + String keyName, + Map tags) { + KeyArgs.Builder keyArgs = KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName); + + if (tags != null && !tags.isEmpty()) { + keyArgs.addAllTags(KeyValueUtil.toProtobuf(tags)); + } + + PutObjectTaggingRequest putObjectTaggingRequest = + PutObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + return OMRequest.newBuilder() + .setPutObjectTaggingRequest(putObjectTaggingRequest) + .setCmdType(Type.PutObjectTagging) + .setClientId(UUID.randomUUID().toString()) + .build(); + } + + private void verifyRequest(OMRequest modifiedRequest, OMRequest originalRequest) { + + KeyArgs original = originalRequest.getPutObjectTaggingRequest().getKeyArgs(); + + KeyArgs updated = modifiedRequest.getPutObjectTaggingRequest().getKeyArgs(); + + assertEquals(original.getVolumeName(), updated.getVolumeName()); + assertEquals(original.getBucketName(), updated.getBucketName()); + assertEquals(original.getKeyName(), updated.getKeyName()); + assertEquals(original.getTagsList(), updated.getTagsList()); + // Modification time will not be set for object tagging request + assertFalse(updated.hasModificationTime()); + } + + protected String addKeyToTable() throws Exception { + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, + omMetadataManager); + + return omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + } + + protected S3PutObjectTaggingRequest getPutObjectTaggingRequest(OMRequest originalRequest) { + return new S3PutObjectTaggingRequest(originalRequest, getBucketLayout()); + } + + protected Map getTags(int size) { + Map tags = new HashMap<>(); + for (int i = 0; i < size; i++) { + tags.put("tag-key-" + UUID.randomUUID(), "tag-value-" + UUID.randomUUID()); + } + return tags; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..38ea5facad2 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.junit.jupiter.api.Test; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test put object tagging request for FSO bucket. + */ +public class TestS3PutObjectTaggingRequestWithFSO extends TestS3PutObjectTaggingRequest { + + private static final String PARENT_DIR = "c/d/e"; + private static final String FILE_NAME = "file1"; + private static final String FILE_KEY = PARENT_DIR + "/" + FILE_NAME; + + @Test + public void testValidateAndUpdateCachePutObjectTaggingToDir() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + addKeyToTable(); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, PARENT_DIR, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.NOT_SUPPORTED_OPERATION, + omClientResponse.getOMResponse().getStatus()); + } + + @Override + protected String addKeyToTable() throws Exception { + keyName = FILE_KEY; // updated key name + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, PARENT_DIR, omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(1L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); + final long volumeId = omMetadataManager.getVolumeId( + omKeyInfo.getVolumeName()); + final long bucketId = omMetadataManager.getBucketId( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + return omMetadataManager.getOzonePathKey( + volumeId, bucketId, omKeyInfo.getParentObjectID(), + omKeyInfo.getFileName()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected S3PutObjectTaggingRequest getPutObjectTaggingRequest(OMRequest originalRequest) { + return new S3PutObjectTaggingRequestWithFSO(originalRequest, getBucketLayout()); + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index 3997f39d7bd..b7b7ff0a464 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -18,37 +18,31 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseWithFSO; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.io.IOException; import java.util.UUID; @@ -64,69 +58,19 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotCreateRequest class, which handles CreateSnapshot request. */ -public class TestOMSnapshotCreateRequest { - @TempDir - private File anotherTempDir; - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - private BatchOperation batchOperation; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotCreateRequest extends TestSnapshotRequestAndResponse { private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - anotherTempDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - } - - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - if (batchOperation != null) { - batchOperation.close(); - } } @ValueSource(strings = { @@ -139,12 +83,35 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String snapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); doPreExecute(omRequest); } + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecuteWithLinkedBucket(String snapshotName) throws Exception { + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + String resolvedBucketName = getBucketName() + "1"; + String resolvedVolumeName = getVolumeName() + "1"; + when(getOzoneManager().resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), Pair.of(resolvedVolumeName, resolvedBucketName), + "owner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); + OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); + assertEquals(resolvedVolumeName, omSnapshotCreateRequest.getOmRequest().getCreateSnapshotRequest().getVolumeName()); + assertEquals(resolvedBucketName, omSnapshotCreateRequest.getOmRequest().getCreateSnapshotRequest().getBucketName()); + } + @ValueSource(strings = { // ? is not allowed in snapshot name. "a?b", @@ -157,9 +124,9 @@ public void testPreExecute(String snapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String snapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -169,8 +136,8 @@ public void testPreExecuteFailure(String snapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName1); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -180,29 +147,29 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName1); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - String key = getTableKey(volumeName, bucketName, snapshotName1); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); assertNotNull(omClientResponse.getOMResponse()); @@ -226,20 +193,21 @@ public void testValidateAndUpdateCache() throws Exception { // Get value from cache SnapshotInfo snapshotInfoInCache = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfoInCache); assertEquals(snapshotInfoFromProto, snapshotInfoInCache); - - assertEquals(0, omMetrics.getNumSnapshotCreateFails()); - assertEquals(1, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotCreates()); + assertEquals(snapshotInfoInCache.getLastTransactionInfo(), + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); + assertEquals(0, getOmMetrics().getNumSnapshotCreateFails()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotCreates()); } @Test public void testEntryRenamedKeyTable() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); Table snapshotRenamedTable = - omMetadataManager.getSnapshotRenamedTable(); + getOmMetadataManager().getSnapshotRenamedTable(); renameKey("key1", "key2", 0); renameDir("dir1", "dir2", 5); @@ -249,17 +217,17 @@ public void testEntryRenamedKeyTable() throws Exception { // Create snapshot createSnapshot(snapshotName1); - String snapKey = getTableKey(volumeName, - bucketName, snapshotName1); + String snapKey = getTableKey(getVolumeName(), + getBucketName(), snapshotName1); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(snapKey); + getOmMetadataManager().getSnapshotInfoTable().get(snapKey); assertNotNull(snapshotInfo); renameKey("key3", "key4", 10); renameDir("dir3", "dir4", 15); // Rename table should have two entries as rename is within snapshot scope. - assertEquals(2, omMetadataManager + assertEquals(2, getOmMetadataManager() .countRowsInTable(snapshotRenamedTable)); // Create snapshot to clear snapshotRenamedTable @@ -269,33 +237,33 @@ public void testEntryRenamedKeyTable() throws Exception { @Test public void testEntryExists() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String key = getTableKey(volumeName, bucketName, snapshotName1); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); OMRequest omRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName1); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Now try to create again to verify error - omRequest = createSnapshotRequest(volumeName, bucketName, snapshotName1); + omRequest = createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); omSnapshotCreateRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getCreateSnapshotResponse()); assertEquals(OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS, omResponse.getStatus()); - assertEquals(1, omMetrics.getNumSnapshotCreateFails()); - assertEquals(1, omMetrics.getNumSnapshotActive()); - assertEquals(2, omMetrics.getNumSnapshotCreates()); + assertEquals(1, getOmMetrics().getNumSnapshotCreateFails()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); + assertEquals(2, getOmMetrics().getNumSnapshotCreates()); } private void renameKey(String fromKey, String toKey, long offset) @@ -314,15 +282,15 @@ private void renameKey(String fromKey, String toKey, long offset) new OMKeyRenameResponse(omResponse, fromKeyInfo.getKeyName(), toKeyInfo.getKeyName(), toKeyInfo); - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); - OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) + OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(getVolumeName(), + getBucketName(), fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) .setObjectID(100L) .build(); @@ -340,32 +308,32 @@ private void renameDir(String fromKey, String toKey, long offset) new OMKeyRenameResponseWithFSO(omResponse, getDBKeyName(fromKeyInfo), getDBKeyName(toKeyInfo), fromKeyParent, null, toKeyInfo, null, true, BucketLayout.FILE_SYSTEM_OPTIMIZED); - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } protected String getDBKeyName(OmKeyInfo keyInfo) throws IOException { - return omMetadataManager.getOzonePathKey( - omMetadataManager.getVolumeId(volumeName), - omMetadataManager.getBucketId(volumeName, bucketName), + return getOmMetadataManager().getOzonePathKey( + getOmMetadataManager().getVolumeId(getVolumeName()), + getOmMetadataManager().getBucketId(getVolumeName(), getBucketName()), keyInfo.getParentObjectID(), keyInfo.getKeyName()); } private void createSnapshot(String snapName) throws Exception { OMRequest omRequest = createSnapshotRequest( - volumeName, bucketName, snapName); + getVolumeName(), getBucketName(), snapName); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); //create entry OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - omClientResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); + omClientResponse.checkAndUpdateDB(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } private OMSnapshotCreateRequest doPreExecute( OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, ozoneManager); + return doPreExecute(originalRequest, getOzoneManager()); } /** @@ -382,15 +350,15 @@ public static OMSnapshotCreateRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - omMetadataManager); - return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + getOmMetadataManager()); + return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index 5a8bb5d7c0d..9e19e594843 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -19,33 +19,25 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.util.UUID; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; @@ -61,10 +53,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** @@ -72,60 +60,15 @@ * Mostly mirrors TestOMSnapshotCreateRequest. * testEntryNotExist() and testEntryExists() are unique. */ -public class TestOMSnapshotDeleteRequest { - @TempDir - private File folder; +public class TestOMSnapshotDeleteRequest extends TestSnapshotRequestAndResponse { - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - - private String volumeName; - private String bucketName; private String snapshotName; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - - OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB( - volumeName, bucketName, omMetadataManager); - } - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - } - - @ValueSource(strings = { // '-' is allowed. "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", @@ -136,12 +79,35 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String deleteSnapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, deleteSnapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); doPreExecute(omRequest); } + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecuteWithLinkedBuckets(String deleteSnapshotName) throws Exception { + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + String resolvedBucketName = getBucketName() + "1"; + String resolvedVolumeName = getVolumeName() + "1"; + when(getOzoneManager().resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), Pair.of(resolvedVolumeName, resolvedBucketName), + "owner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); + OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); + assertEquals(resolvedVolumeName, omSnapshotDeleteRequest.getOmRequest().getDeleteSnapshotRequest().getVolumeName()); + assertEquals(resolvedBucketName, omSnapshotDeleteRequest.getOmRequest().getDeleteSnapshotRequest().getBucketName()); + } + @ValueSource(strings = { // ? is not allowed in snapshot name. "a?b", @@ -154,9 +120,9 @@ public void testPreExecute(String deleteSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String deleteSnapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, deleteSnapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -166,8 +132,8 @@ public void testPreExecuteFailure(String deleteSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, snapshotName); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -177,27 +143,27 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); OMRequest omRequest = - deleteSnapshotRequest(volumeName, bucketName, snapshotName); + deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); // As we have not still called validateAndUpdateCache, get() should // return null. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // add key to cache - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), snapshotName, null, Time.now()); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry( + getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Trigger validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -207,14 +173,14 @@ public void testValidateAndUpdateCache() throws Exception { assertEquals(OK, omResponse.getStatus()); // check cache - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, omMetrics.getNumSnapshotCreates()); + assertEquals(0, getOmMetrics().getNumSnapshotCreates()); // Expected -1 because no snapshot was created before. - assertEquals(-1, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotDeleted()); - assertEquals(0, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(-1, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleted()); + assertEquals(0, getOmMetrics().getNumSnapshotDeleteFails()); } /** @@ -222,25 +188,25 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testEntryNotExist() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); OMRequest omRequest = deleteSnapshotRequest( - volumeName, bucketName, snapshotName); + getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); // Entry does not exist - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Trigger delete snapshot validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 1L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 1L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); - assertEquals(0, omMetrics.getNumSnapshotActive()); - assertEquals(0, omMetrics.getNumSnapshotDeleted()); - assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); + assertEquals(0, getOmMetrics().getNumSnapshotDeleted()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); } /** @@ -249,50 +215,50 @@ public void testEntryNotExist() throws Exception { */ @Test public void testEntryExist() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); OMRequest omRequest1 = - createSnapshotRequest(volumeName, bucketName, snapshotName); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest1, ozoneManager); + TestOMSnapshotCreateRequest.doPreExecute(omRequest1, getOzoneManager()); - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Create snapshot entry - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1L); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - assertEquals(1, omMetrics.getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); OMRequest omRequest2 = - deleteSnapshotRequest(volumeName, bucketName, snapshotName); + deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest2); // Delete snapshot entry OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); // Response should be successful OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(OK, omResponse.getStatus()); - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); // The snapshot entry should still exist in the table, // but marked as DELETED. assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); assertThat(snapshotInfo.getDeletionTime()).isGreaterThan(0L); - assertEquals(0, omMetrics.getNumSnapshotActive()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); // Now delete snapshot entry again, expect error. - omRequest2 = deleteSnapshotRequest(volumeName, bucketName, snapshotName); + omRequest2 = deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); omSnapshotDeleteRequest = doPreExecute(omRequest2); omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 3L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 3L); omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -300,11 +266,11 @@ public void testEntryExist() throws Exception { assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); // Snapshot entry should still be there. - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); } private OMSnapshotDeleteRequest doPreExecute( @@ -313,7 +279,7 @@ private OMSnapshotDeleteRequest doPreExecute( new OMSnapshotDeleteRequest(originalRequest); OMRequest modifiedRequest = - omSnapshotDeleteRequest.preExecute(ozoneManager); + omSnapshotDeleteRequest.preExecute(getOzoneManager()); return new OMSnapshotDeleteRequest(modifiedRequest); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java new file mode 100644 index 00000000000..247f322dfcf --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.deleteSnapshotRequest; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.moveSnapshotTableKeyRequest; + +/** + * Class to test OmSnapshotMoveTableKeyRequest. + */ +public class TestOMSnapshotMoveTableKeysRequest extends TestSnapshotRequestAndResponse { + + private String snapshotName1; + private String snapshotName2; + private SnapshotInfo snapshotInfo1; + private SnapshotInfo snapshotInfo2; + + @BeforeEach + public void setup() throws Exception { + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + } + + public TestOMSnapshotMoveTableKeysRequest() { + super(true); + } + + private void createSnapshots(boolean createSecondSnapshot) throws Exception { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); + snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); + if (createSecondSnapshot) { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); + snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); + } + } + + private SnapshotInfo deleteSnapshot(SnapshotInfo snapshotInfo, long transactionIndex) throws Exception { + OzoneManagerProtocolProtos.OMRequest omRequest = deleteSnapshotRequest(snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), snapshotInfo.getName()); + OMSnapshotDeleteRequest omSnapshotDeleteRequest = new OMSnapshotDeleteRequest(omRequest); + omSnapshotDeleteRequest.preExecute(getOzoneManager()); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), transactionIndex); + return SnapshotUtils.getSnapshotInfo(getOzoneManager(), snapshotInfo.getTableKey()); + } + + @Test + public void testValidateAndUpdateCacheWithNextSnapshotInactive() throws Exception { + createSnapshots(true); + snapshotInfo2 = deleteSnapshot(snapshotInfo2, 0); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( + omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); + Assertions.assertFalse(omClientResponse.getOMResponse().getSuccess()); + Assertions.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_SNAPSHOT_ERROR, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testPreExecuteWithInvalidDeletedKeyPrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 10, 0)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDeletedDirPrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 1)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), deletedDirs, Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidNumberKeys() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 10)) + .flatMap(List::stream).collect(Collectors.toList()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 0, 0)) + .flatMap(List::stream).collect(Collectors.toList()); + List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); + renameKeys.add(Pair.of(getOmMetadataManager().getRenameKey(getVolumeName(), getBucketName(), 11), null)); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, deletedDirs, renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + omRequest = omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager()); + for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedDir : + omRequest.getSnapshotMoveTableKeysRequest().getDeletedDirsList()) { + Assertions.assertEquals(1, deletedDir.getKeyInfosList().size()); + } + + for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedKey : + omRequest.getSnapshotMoveTableKeysRequest().getDeletedKeysList()) { + Assertions.assertNotEquals(0, deletedKey.getKeyInfosList().size()); + } + + for (HddsProtos.KeyValue renameKey : omRequest.getSnapshotMoveTableKeysRequest().getRenamedKeysList()) { + Assertions.assertTrue(renameKey.hasKey() && renameKey.hasValue()); + } + + } + + @Test + public void testPreExecuteWithInvalidRenamePrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List> renameKeys = + Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), + getRenameKeys(invalidVolumeName, invalidBucketName, 0, 10, snapshotName2)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0); + List>> deletedDirs = getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1); + List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, deletedDirs, renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + // perform preExecute. + omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( + omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); + Assertions.assertTrue(omClientResponse.getOMResponse().getSuccess()); + Assertions.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateDeletedKey() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateDeletedDir() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), deletedDirs, Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateRenameKey() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List> renameKeys = + Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), + getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 8edd096e766..1c44decdfda 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -19,44 +19,32 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import com.google.protobuf.ByteString; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; @@ -68,10 +56,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -79,49 +65,16 @@ /** * Tests OMSnapshotPurgeRequest class. */ -public class TestOMSnapshotPurgeRequestAndResponse { - private List checkpointPaths = new ArrayList<>(); - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OMMetadataManager omMetadataManager; - private OmSnapshotManager omSnapshotManager; - private AuditLogger auditLogger; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotPurgeRequestAndResponse extends TestSnapshotRequestAndResponse { + private final List checkpointPaths = new ArrayList<>(); private String keyName; + public TestOMSnapshotPurgeRequestAndResponse() { + super(true); + } + @BeforeEach - void setup(@TempDir File testDir) throws Exception { - ozoneManager = mock(OzoneManager.class); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - testDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); - when(ozoneManager.isAdmin(any())).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - - ReferenceCounted rcOmMetadataReader = - mock(ReferenceCounted.class); - when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); - omSnapshotManager = new OmSnapshotManager(ozoneManager); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); + public void setup() throws Exception { keyName = UUID.randomUUID().toString(); } @@ -132,17 +85,14 @@ private List createSnapshots(int numSnapshotKeys) throws Exception { Random random = new Random(); - // Add volume, bucket and key entries to OM DB. - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); // Create Snapshot and CheckpointDir List purgeSnapshots = new ArrayList<>(numSnapshotKeys); for (int i = 1; i <= numSnapshotKeys; i++) { String snapshotName = keyName + "-" + random.nextLong(); createSnapshotCheckpoint(snapshotName); - purgeSnapshots.add(SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName)); + purgeSnapshots.add(SnapshotInfo.getTableKey(getVolumeName(), + getBucketName(), snapshotName)); } return purgeSnapshots; @@ -172,39 +122,7 @@ private OMRequest createPurgeKeysRequest(List purgeSnapshotKeys) { * Create snapshot and checkpoint directory. */ private void createSnapshotCheckpoint(String snapshotName) throws Exception { - createSnapshotCheckpoint(volumeName, bucketName, snapshotName); - } - - private void createSnapshotCheckpoint(String volume, - String bucket, - String snapshotName) throws Exception { - OMRequest omRequest = OMRequestTestUtils - .createSnapshotRequest(volume, bucket, snapshotName); - // Pre-Execute OMSnapshotCreateRequest. - OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); - - // validateAndUpdateCache OMSnapshotCreateResponse. - OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - // Add to batch and commit to DB. - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - } - - String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); - SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); - assertNotNull(snapshotInfo); - - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); - // Check the DB is still there - assertTrue(Files.exists(snapshotDirPath)); - checkpointPaths.add(snapshotDirPath); + checkpointPaths.add(createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName)); } private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) @@ -212,7 +130,7 @@ private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) OMSnapshotPurgeRequest omSnapshotPurgeRequest = new OMSnapshotPurgeRequest(originalOmRequest); OMRequest modifiedOmRequest = omSnapshotPurgeRequest - .preExecute(ozoneManager); + .preExecute(getOzoneManager()); return new OMSnapshotPurgeRequest(modifiedOmRequest); } @@ -224,48 +142,48 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) // validateAndUpdateCache for OMSnapshotPurgeRequest. OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); // Commit to DB. - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); } } @Test public void testValidateAndUpdateCache() throws Exception { - long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); for (String snapshotTableKey: snapshotDbKeysToPurge) { - assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey)); } - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); } // Check if the entries are deleted. - assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertTrue(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); // Check if all the checkpoints are cleared. for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } - assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount + 1, getOmMetrics().getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, getOmMetrics().getNumSnapshotPurgeFails()); } /** @@ -273,8 +191,8 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testValidateAndUpdateCacheFailure() throws Exception { - long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); @@ -283,17 +201,17 @@ public void testValidateAndUpdateCacheFailure() throws Exception { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); - assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount, getOmMetrics().getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, getOmMetrics().getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after @@ -306,7 +224,7 @@ public void testSnapshotChainCleanup(int index) throws Exception { // Before purge, check snapshot chain OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) omMetadataManager; + (OmMetadataManagerImpl) getOmMetadataManager(); SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); SnapshotInfo snapInfo = metadataManager.getSnapshotInfoTable() @@ -340,8 +258,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { snapInfo.getSnapshotId()); } - long rowsInTableBeforePurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long rowsInTableBeforePurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); // Purge Snapshot of the given index. List toPurgeList = Collections.singletonList(snapShotToPurge); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( @@ -364,8 +282,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { .getGlobalPreviousSnapshotId(), prevGlobalSnapId); } - assertNotEquals(rowsInTableBeforePurge, omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); + assertNotEquals(rowsInTableBeforePurge, getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable())); } private static Stream snapshotPurgeCases() { @@ -419,14 +337,14 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int toIndex, boolean createInBucketOrder) throws Exception { SnapshotChainManager chainManager = - ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager(); + ((OmMetadataManagerImpl) getOmMetadataManager()).getSnapshotChainManager(); int totalKeys = numberOfBuckets * numberOfKeysPerBucket; List buckets = new ArrayList<>(); for (int i = 0; i < numberOfBuckets; i++) { String bucketNameLocal = "bucket-" + UUID.randomUUID(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketNameLocal, - omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(getVolumeName(), bucketNameLocal, + getOmMetadataManager()); buckets.add(bucketNameLocal); } @@ -437,26 +355,43 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int bucketIndex = createInBucketOrder ? i : j; String bucket = buckets.get(bucketIndex % numberOfBuckets); String snapshotName = UUID.randomUUID().toString(); - createSnapshotCheckpoint(volumeName, bucket, snapshotName); + createSnapshotCheckpoint(getVolumeName(), bucket, snapshotName); String snapshotTableKey = - SnapshotInfo.getTableKey(volumeName, bucket, snapshotName); + SnapshotInfo.getTableKey(getVolumeName(), bucket, snapshotName); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey); snapshotInfoList.add(snapshotInfo); } } - long numberOfSnapshotBeforePurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long numberOfSnapshotBeforePurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); assertEquals(totalKeys, numberOfSnapshotBeforePurge); assertEquals(totalKeys, chainManager.getGlobalSnapshotChain().size()); - - validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoList); - + Map expectedTransactionInfos = new HashMap<>(); + // Ratis transaction uses term index 1 while creating snapshot. + ByteString expectedLastTransactionVal = TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)) + .toByteString(); + for (SnapshotInfo snapshotInfo : snapshotInfoList) { + expectedTransactionInfos.put(snapshotInfo.getSnapshotId(), expectedLastTransactionVal); + } + validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoList, expectedTransactionInfos); + // Ratis transaction uses term index 200 while purging snapshot. + expectedLastTransactionVal = TransactionInfo.valueOf(TransactionInfo.getTermIndex(200L)) + .toByteString(); List purgeSnapshotKeys = new ArrayList<>(); for (int i = fromIndex; i <= toIndex; i++) { SnapshotInfo purgeSnapshotInfo = snapshotInfoList.get(i); - String purgeSnapshotKey = SnapshotInfo.getTableKey(volumeName, + UUID snapId = purgeSnapshotInfo.getSnapshotId(); + // expecting nextPathSnapshot & nextGlobalSnapshot in chain gets updated. + if (chainManager.hasNextGlobalSnapshot(snapId)) { + expectedTransactionInfos.put(chainManager.nextGlobalSnapshot(snapId), expectedLastTransactionVal); + } + if (chainManager.hasNextPathSnapshot(purgeSnapshotInfo.getSnapshotPath(), snapId)) { + expectedTransactionInfos.put(chainManager.nextPathSnapshot(purgeSnapshotInfo.getSnapshotPath(), snapId), + expectedLastTransactionVal); + } + String purgeSnapshotKey = SnapshotInfo.getTableKey(getVolumeName(), purgeSnapshotInfo.getBucketName(), purgeSnapshotInfo.getName()); purgeSnapshotKeys.add(purgeSnapshotKey); @@ -469,34 +404,34 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( for (int i = 0; i < totalKeys; i++) { if (i < fromIndex || i > toIndex) { SnapshotInfo info = snapshotInfoList.get(i); - String snapshotKey = SnapshotInfo.getTableKey(volumeName, + String snapshotKey = SnapshotInfo.getTableKey(getVolumeName(), info.getBucketName(), info.getName()); snapshotInfoListAfterPurge.add( - omMetadataManager.getSnapshotInfoTable().get(snapshotKey)); + getOmMetadataManager().getSnapshotInfoTable().get(snapshotKey)); } } long expectNumberOfSnapshotAfterPurge = totalKeys - (toIndex - fromIndex + 1); - long actualNumberOfSnapshotAfterPurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long actualNumberOfSnapshotAfterPurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); assertEquals(expectNumberOfSnapshotAfterPurge, actualNumberOfSnapshotAfterPurge); assertEquals(expectNumberOfSnapshotAfterPurge, chainManager .getGlobalSnapshotChain().size()); - validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain( - snapshotInfoListAfterPurge); + validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain(snapshotInfoListAfterPurge, expectedTransactionInfos); } private void validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain( - List snapshotInfoList - ) throws IOException { + List snapshotInfoList, Map expectedTransactionInfos) throws IOException { if (snapshotInfoList.isEmpty()) { return; } - + for (SnapshotInfo snapshotInfo : snapshotInfoList) { + assertEquals(snapshotInfo.getLastTransactionInfo(), expectedTransactionInfos.get(snapshotInfo.getSnapshotId())); + } OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) omMetadataManager; + (OmMetadataManagerImpl) getOmMetadataManager(); SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java index ab2bac1bd0e..8059c3ce501 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -16,36 +16,28 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; @@ -62,75 +54,19 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotRenameRequest class, which handles RenameSnapshot request. */ -public class TestOMSnapshotRenameRequest { - - @TempDir - private File anotherTempDir; - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - private BatchOperation batchOperation; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotRenameRequest extends TestSnapshotRequestAndResponse { private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - anotherTempDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - anotherTempDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); - OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - } - - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - if (batchOperation != null) { - batchOperation.close(); - } } @ValueSource(strings = { @@ -143,14 +79,38 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String toSnapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); doPreExecute(omRequest); } + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecuteWithLinkedBucket(String toSnapshotName) throws Exception { + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + String resolvedBucketName = getBucketName() + "1"; + String resolvedVolumeName = getVolumeName() + "1"; + when(getOzoneManager().resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), Pair.of(resolvedVolumeName, resolvedBucketName), + "owner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + assertEquals(resolvedVolumeName, omSnapshotRenameRequest.getOmRequest().getRenameSnapshotRequest().getVolumeName()); + assertEquals(resolvedBucketName, omSnapshotRenameRequest.getOmRequest().getRenameSnapshotRequest().getBucketName()); + } + @ValueSource(strings = { // ? is not allowed in snapshot name. "a?b", @@ -167,10 +127,10 @@ public void testPreExecute(String toSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String toSnapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage().contains("Invalid snapshot name: " + toSnapshotName)); @@ -179,8 +139,8 @@ public void testPreExecuteFailure(String toSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, snapshotName1, snapshotName2); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1, snapshotName2); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -190,39 +150,39 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, snapshotName1, snapshotName2); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); - String key = getTableKey(volumeName, bucketName, snapshotName1); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Add key to cache. - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), snapshotName1, UUID.randomUUID(), Time.now()); snapshotInfo.setReferencedSize(1000L); snapshotInfo.setReferencedReplicatedSize(3 * 1000L); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry( + getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 2L); assertNotNull(omClientResponse.getOMResponse()); @@ -244,56 +204,56 @@ public void testValidateAndUpdateCache() throws Exception { SnapshotInfo snapshotInfoOldProto = getFromProtobuf(snapshotInfoProto); - String key2 = getTableKey(volumeName, bucketName, snapshotName2); + String key2 = getTableKey(getVolumeName(), getBucketName(), snapshotName2); // Get value from cache SnapshotInfo snapshotInfoNewInCache = - omMetadataManager.getSnapshotInfoTable().get(key2); + getOmMetadataManager().getSnapshotInfoTable().get(key2); assertNotNull(snapshotInfoNewInCache); assertEquals(snapshotInfoOldProto, snapshotInfoNewInCache); assertEquals(snapshotInfo.getSnapshotId(), snapshotInfoNewInCache.getSnapshotId()); SnapshotInfo snapshotInfoOldInCache = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNull(snapshotInfoOldInCache); } @Test public void testEntryExists() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); - String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // First make sure we have two snapshots. OzoneManagerProtocolProtos.OMRequest createOmRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName1); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); createOmRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName2); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName2); omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -303,24 +263,24 @@ public void testEntryExists() throws Exception { @Test public void testEntryNotFound() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); - String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -330,7 +290,7 @@ public void testEntryNotFound() throws Exception { private OMSnapshotRenameRequest doPreExecute( OzoneManagerProtocolProtos.OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, ozoneManager); + return doPreExecute(originalRequest, getOzoneManager()); } public static OMSnapshotRenameRequest doPreExecute( @@ -344,15 +304,15 @@ public static OMSnapshotRenameRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - omMetadataManager); - return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + getOmMetadataManager()); + return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index b5bfc2714b0..380922f9e22 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -18,32 +18,23 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotSetPropertyResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.io.TempDir; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -60,37 +51,13 @@ * Tests TestOMSnapshotSetPropertyRequest * TestOMSnapshotSetPropertyResponse class. */ -public class TestOMSnapshotSetPropertyRequestAndResponse { - private BatchOperation batchOperation; - private OzoneManager ozoneManager; - private OMMetadataManager omMetadataManager; - private OMMetrics omMetrics; - private String volumeName; - private String bucketName; +public class TestOMSnapshotSetPropertyRequestAndResponse extends TestSnapshotRequestAndResponse { private String snapName; private long exclusiveSize; private long exclusiveSizeAfterRepl; @BeforeEach - void setup(@TempDir File testDir) throws Exception { - omMetrics = OMMetrics.create(); - ozoneManager = mock(OzoneManager.class); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - testDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); + void setup() { snapName = UUID.randomUUID().toString(); exclusiveSize = 2000L; exclusiveSizeAfterRepl = 6000L; @@ -98,11 +65,11 @@ void setup(@TempDir File testDir) throws Exception { @Test public void testValidateAndUpdateCache() throws IOException { - long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); @@ -111,28 +78,27 @@ public void testValidateAndUpdateCache() throws IOException { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(request); OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest - .preExecute(ozoneManager); + .preExecute(getOzoneManager()); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) omSnapshotSetPropertyRequest - .validateAndUpdateCache(ozoneManager, 200L); + .validateAndUpdateCache(getOzoneManager(), 200L); // Commit to DB. - batchOperation = omMetadataManager.getStore().initBatchOperation(); - omSnapshotSetPropertyResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omSnapshotSetPropertyResponse.checkAndUpdateDB(getOmMetadataManager(), + getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), - omMetrics.getNumSnapshotSetProperties()); - assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); + getOmMetrics().getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, getOmMetrics().getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> - iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { + iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); assertCacheValues(snapshotEntry.getKey()); @@ -149,11 +115,11 @@ public void testValidateAndUpdateCache() throws IOException { */ @Test public void testValidateAndUpdateCacheFailure() throws IOException { - long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); @@ -161,27 +127,27 @@ public void testValidateAndUpdateCacheFailure() throws IOException { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); for (OMRequest omRequest: snapshotUpdateSizeRequests) { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); - OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(getOzoneManager()); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) - omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotSetPropertyRequest.validateAndUpdateCache(getOzoneManager(), 200L); assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); } - assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyCount, getOmMetrics().getNumSnapshotSetProperties()); assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), - omMetrics.getNumSnapshotSetPropertyFails()); + getOmMetrics().getNumSnapshotSetPropertyFails()); } private void assertCacheValues(String dbKey) { - CacheValue cacheValue = omMetadataManager + CacheValue cacheValue = getOmMetadataManager() .getSnapshotInfoTable() .getCacheValue(new CacheKey<>(dbKey)); assertEquals(exclusiveSize, cacheValue.getCacheValue().getExclusiveSize()); @@ -193,7 +159,7 @@ private List createSnapshotUpdateSizeRequest() throws IOException { List omRequests = new ArrayList<>(); try (TableIterator> - iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { + iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { String snapDbKey = iterator.next().getKey(); SnapshotSize snapshotSize = SnapshotSize.newBuilder() @@ -220,8 +186,8 @@ private List createSnapshotUpdateSizeRequest() private void createSnapshotDataForTest() throws IOException { // Create 10 Snapshots for (int i = 0; i < 10; i++) { - OMRequestTestUtils.addSnapshotToTableCache(volumeName, bucketName, - snapName + i, omMetadataManager); + OMRequestTestUtils.addSnapshotToTableCache(getVolumeName(), getBucketName(), + snapName + i, getOmMetadataManager()); } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java index 14f1438b78b..5e0d2db17c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java @@ -81,6 +81,7 @@ public void setup() throws Exception { auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); } @AfterEach diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java new file mode 100644 index 00000000000..26daacf6f28 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; + +/** + * Test delete object tagging response. + */ +public class TestS3DeleteObjectTaggingResponse extends TestOMKeyResponse { + + @Test + public void testAddToBatch() throws Exception { + OzoneManagerProtocolProtos.OMResponse omResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteObjectTaggingResponse( + OzoneManagerProtocolProtos.DeleteObjectTaggingResponse.getDefaultInstance()) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteObjectTagging) + .build(); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + String ozoneKey = addKeyToTable(tags); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(2, omKeyInfo.getTags().size()); + + omKeyInfo.getTags().clear(); + + S3DeleteObjectTaggingResponse deleteObjectTaggingResponse = getDeleteObjectTaggingResponse(omKeyInfo, omResponse); + + deleteObjectTaggingResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + OmKeyInfo updatedOmKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotSame(omKeyInfo, updatedOmKeyInfo); + assertNotNull(updatedOmKeyInfo); + assertEquals(0, updatedOmKeyInfo.getTags().size()); + } + + protected String addKeyToTable(Map tags) throws Exception { + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addKeyToTable(false, false, omKeyInfo, + clientID, 1L, omMetadataManager); + return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + } + + protected S3DeleteObjectTaggingResponse getDeleteObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3DeleteObjectTaggingResponse(omResponse, omKeyInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..923ff441e98 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test delete object tagging response for FSO bucket. + */ +public class TestS3DeleteObjectTaggingResponseWithFSO extends TestS3DeleteObjectTaggingResponse { + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected String addKeyToTable(Map tags) throws Exception { + // Add volume, bucket and key entries to OM DB. + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, "", omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(1L) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + keyName, omKeyInfo, -1, 50, omMetadataManager); + return omMetadataManager.getOzonePathKey( + omMetadataManager.getVolumeId(volumeName), + omMetadataManager.getBucketId(volumeName, bucketName), + omKeyInfo.getParentObjectID(), keyName); + } + + @Override + protected S3DeleteObjectTaggingResponse getDeleteObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3DeleteObjectTaggingResponseWithFSO(omResponse, omKeyInfo, + omMetadataManager.getVolumeId(volumeName), omBucketInfo.getObjectID()); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java new file mode 100644 index 00000000000..af6565a447f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; + +/** + * Test put object tagging response. + */ +public class TestS3PutObjectTaggingResponse extends TestOMKeyResponse { + + @Test + public void testAddToDBBatch() throws Exception { + OzoneManagerProtocolProtos.OMResponse omResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder().setPutObjectTaggingResponse( + OzoneManagerProtocolProtos.PutObjectTaggingResponse.getDefaultInstance()) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.PutObjectTagging) + .build(); + + String ozoneKey = addKeyToTable(); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(0, omKeyInfo.getTags().size()); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + omKeyInfo.setTags(tags); + + S3PutObjectTaggingResponse putObjectTaggingResponse = getPutObjectTaggingResponse(omKeyInfo, omResponse); + + putObjectTaggingResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + OmKeyInfo updatedOmKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotSame(omKeyInfo, updatedOmKeyInfo); + assertNotNull(updatedOmKeyInfo); + assertEquals(tags.size(), updatedOmKeyInfo.getTags().size()); + } + + protected String addKeyToTable() throws Exception { + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), + omMetadataManager); + + return omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + } + + protected S3PutObjectTaggingResponse getPutObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3PutObjectTaggingResponse(omResponse, omKeyInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..1c93a527711 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test put object tagging response for FSO bucket. + */ +public class TestS3PutObjectTaggingResponseWithFSO extends TestS3PutObjectTaggingResponse { + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected String addKeyToTable() throws Exception { + // Add volume, bucket and key entries to OM DB. + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, "", omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(1L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + keyName, omKeyInfo, -1, 50, omMetadataManager); + return omMetadataManager.getOzonePathKey( + omMetadataManager.getVolumeId(volumeName), + omMetadataManager.getBucketId(volumeName, bucketName), + omKeyInfo.getParentObjectID(), keyName); + } + + @Override + protected S3PutObjectTaggingResponse getPutObjectTaggingResponse(OmKeyInfo omKeyInfo, OMResponse omResponse) + throws IOException { + return new S3PutObjectTaggingResponseWithFSO(omResponse, omKeyInfo, + omMetadataManager.getVolumeId(volumeName), omBucketInfo.getObjectID()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index 7f74f3d17ec..a370c20ad1b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -27,6 +27,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -98,7 +99,8 @@ public void testAddToDBBatch(int numberOfKeys) throws Exception { snapshotName, snapshotId, Time.now()); - + snapshotInfo.setLastTransactionInfo( + TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); // confirm table is empty assertEquals(0, omMetadataManager .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java new file mode 100644 index 00000000000..d2e2d94ec73 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.response.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** + * Test class to test OMSnapshotMoveTableKeysResponse. + */ +public class TestOMSnapshotMoveTableKeysResponse extends TestSnapshotRequestAndResponse { + + private String snapshotName1; + private String snapshotName2; + private SnapshotInfo snapshotInfo1; + private SnapshotInfo snapshotInfo2; + + @BeforeEach + public void setup() throws Exception { + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + } + + public TestOMSnapshotMoveTableKeysResponse() { + super(true); + } + + private void createSnapshots(boolean createSecondSnapshot) throws Exception { + addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 0, + 10, snapshotName1)); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 0, + 10, 10, 0).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedDirTable(), + getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1).stream() + .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); + snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); + addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 5, + 15, snapshotName2)); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 5, + 8, 10, 10).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 8, + 15, 10, 0).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedDirTable(), + getDeletedDirKeys(getVolumeName(), getBucketName(), 5, 15, 1).stream() + .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); + if (createSecondSnapshot) { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); + snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); + } + } + + private void addDataToTable(Table table, List> vals) throws IOException { + for (Pair pair : vals) { + table.put(pair.getKey(), pair.getValue()); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testMoveTableKeysToNextSnapshot(boolean nextSnapshotExists) throws Exception { + createSnapshots(nextSnapshotExists); + + try (ReferenceCounted snapshot1 = getOmSnapshotManager().getSnapshot(getVolumeName(), getBucketName(), + snapshotName1); + ReferenceCounted snapshot2 = nextSnapshotExists ? getOmSnapshotManager().getSnapshot( + getVolumeName(), getBucketName(), snapshotName2) : null) { + OmSnapshot snapshot = snapshot1.get(); + List deletedTable = new ArrayList<>(); + List deletedDirTable = new ArrayList<>(); + List renamedTable = new ArrayList<>(); + Map renameEntries = new HashMap<>(); + snapshot.getMetadataManager().getDeletedTable().iterator() + .forEachRemaining(entry -> { + try { + deletedTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) + .addAllKeyInfos(entry.getValue().getOmKeyInfoList().stream().map(omKeyInfo -> omKeyInfo.getProtobuf( + ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())).build()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + snapshot.getMetadataManager().getDeletedDirTable().iterator() + .forEachRemaining(entry -> { + try { + deletedDirTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) + .addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + snapshot.getMetadataManager().getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { + try { + renamedTable.add(HddsProtos.KeyValue.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()); + renameEntries.put(entry.getKey(), entry.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + OMSnapshotMoveTableKeysResponse response = new OMSnapshotMoveTableKeysResponse( + OzoneManagerProtocolProtos.OMResponse.newBuilder().setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.SnapshotMoveTableKeys).build(), + snapshotInfo1, nextSnapshotExists ? snapshotInfo2 : null, deletedTable, deletedDirTable, renamedTable); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + response.addToDBBatch(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); + } + Assertions.assertTrue(snapshot.getMetadataManager().getDeletedTable().isEmpty()); + Assertions.assertTrue(snapshot.getMetadataManager().getDeletedDirTable().isEmpty()); + Assertions.assertTrue(snapshot.getMetadataManager().getSnapshotRenamedTable().isEmpty()); + OMMetadataManager nextMetadataManager = + nextSnapshotExists ? snapshot2.get().getMetadataManager() : getOmMetadataManager(); + AtomicInteger count = new AtomicInteger(); + nextMetadataManager.getDeletedTable().iterator().forEachRemaining(entry -> { + count.getAndIncrement(); + try { + int maxCount = count.get() >= 6 && count.get() <= 8 ? 20 : 10; + Assertions.assertEquals(maxCount, entry.getValue().getOmKeyInfoList().size()); + List versions = entry.getValue().getOmKeyInfoList().stream().map(OmKeyInfo::getKeyLocationVersions) + .map(omKeyInfo -> omKeyInfo.get(0).getVersion()).collect(Collectors.toList()); + List expectedVersions = new ArrayList<>(); + if (maxCount == 20) { + expectedVersions.addAll(LongStream.range(10, 20).boxed().collect(Collectors.toList())); + } + expectedVersions.addAll(LongStream.range(0, 10).boxed().collect(Collectors.toList())); + Assertions.assertEquals(expectedVersions, versions); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + Assertions.assertEquals(15, count.get()); + count.set(0); + + nextMetadataManager.getDeletedDirTable().iterator().forEachRemaining(entry -> count.getAndIncrement()); + Assertions.assertEquals(15, count.get()); + count.set(0); + nextMetadataManager.getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { + try { + String expectedValue = renameEntries.getOrDefault(entry.getKey(), entry.getValue()); + Assertions.assertEquals(expectedValue, entry.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + count.getAndIncrement(); + }); + Assertions.assertEquals(15, count.get()); + } + + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java index 9d8de4bbb20..075dad5ee03 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -31,13 +29,8 @@ import org.apache.hadoop.ozone.storage.proto. OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,33 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeCreateResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } +public class TestOMVolumeCreateResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -79,10 +51,10 @@ public void testAddToDBBatch() throws Exception { .addVolumeNames(volumeName).build(); OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) .build(); OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() @@ -109,7 +81,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS) @@ -125,6 +98,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java index 70dd23a7b04..e4b93881137 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -30,14 +28,9 @@ .OMResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import java.util.UUID; -import java.nio.file.Path; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNull; @@ -45,33 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeDeleteResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } +public class TestOMVolumeDeleteResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -95,7 +67,7 @@ public void testAddToDBBatch() throws Exception { // As we are deleting updated volume list should be empty. PersistedUserVolumeInfo updatedVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(1).setUpdateID(1).build(); + .setObjectID(1).setUpdateID(1).build(); OMVolumeDeleteResponse omVolumeDeleteResponse = new OMVolumeDeleteResponse(omResponse, volumeName, userName, updatedVolumeList); @@ -107,7 +79,7 @@ public void testAddToDBBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); assertNull(omMetadataManager.getVolumeTable().get( - omMetadataManager.getVolumeKey(volumeName))); + omMetadataManager.getVolumeKey(volumeName))); assertNull(omMetadataManager.getUserTable().get( omMetadataManager.getUserKey(userName))); @@ -115,7 +87,8 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchNoOp() { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -127,5 +100,4 @@ public void testAddToDBBatchNoOp() { omResponse); assertDoesNotThrow(() -> omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation)); } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java new file mode 100644 index 00000000000..7edbaedf2dd --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; + +import java.nio.file.Path; + +/** + * Base test class for OM volume response. + */ +public class TestOMVolumeResponse { + @TempDir + private Path folder; + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @BeforeEach + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.toAbsolutePath().toString()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @AfterEach + public void tearDown() { + if (batchOperation != null) { + batchOperation.close(); + } + } + + protected OMMetadataManager getOmMetadataManager() { + return omMetadataManager; + } + protected BatchOperation getBatchOperation() { + return batchOperation; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java index aa640067ca4..00da2029c1e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -30,14 +28,9 @@ .OMResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,34 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeSetOwnerResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } - +public class TestOMVolumeSetOwnerResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String oldOwner = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -94,25 +65,24 @@ public void testAddToDBBatch() throws Exception { new OMVolumeCreateResponse(omResponse, omVolumeArgs, volumeList); - String newOwner = "user2"; PersistedUserVolumeInfo newOwnerVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(1) - .setUpdateID(1) - .addVolumeNames(volumeName).build(); + .setObjectID(1) + .setUpdateID(1) + .addVolumeNames(volumeName).build(); PersistedUserVolumeInfo oldOwnerVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(2) - .setUpdateID(2) - .build(); + .setObjectID(2) + .setUpdateID(2) + .build(); OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(newOwner).setAdminName(newOwner) .setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime()) .build(); OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = - new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList, + new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList, newOwnerVolumeList, newOwnerVolumeArgs); omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -139,7 +109,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -155,6 +126,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java index fbc8e3c944d..c33e9d174a9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -29,14 +27,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -44,34 +37,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeSetQuotaResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } - +public class TestOMVolumeSetQuotaResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; @@ -107,7 +78,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -123,6 +95,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index 8dcb030d637..681b24b8e42 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -50,6 +50,10 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -161,4 +165,60 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { 500, 60000); assertThat(dirDeletingService.getRunCount().get()).isGreaterThanOrEqualTo(1); } + + @Test + public void testDeleteDirectoryFlatDirsHavingNoChilds() throws Exception { + OzoneConfiguration conf = createConfAndInitValues(); + OmTestManagers omTestManagers + = new OmTestManagers(conf); + KeyManager keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + om.getMetadataManager(), BucketLayout.FILE_SYSTEM_OPTIMIZED); + String bucketKey = om.getMetadataManager().getBucketKey(volumeName, bucketName); + OmBucketInfo bucketInfo = om.getMetadataManager().getBucketTable().get(bucketKey); + + int dirCreatesCount = OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT * 2 + 100; + long parentId = 1; + OmDirectoryInfo baseDir = new OmDirectoryInfo.Builder().setName("dir_base") + .setCreationTime(Time.now()).setModificationTime(Time.now()) + .setObjectID(parentId).setParentObjectID(bucketInfo.getObjectID()) + .setUpdateID(0).build(); + OMRequestTestUtils.addDirKeyToDirTable(true, baseDir, volumeName, bucketName, + 1L, om.getMetadataManager()); + for (int i = 0; i < dirCreatesCount; ++i) { + OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder().setName("dir" + i) + .setCreationTime(Time.now()).setModificationTime(Time.now()).setParentObjectID(parentId) + .setObjectID(i + 100).setUpdateID(i).build(); + OMRequestTestUtils.addDirKeyToDirTable(true, dir1, volumeName, bucketName, + 1L, om.getMetadataManager()); + } + + DirectoryDeletingService dirDeletingService = keyManager.getDirDeletingService(); + long[] delDirCnt = new long[2]; + delDirCnt[0] = dirDeletingService.getDeletedDirsCount(); + + OmKeyArgs delArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName).setBucketName(bucketName).setKeyName("dir_base") + .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) + .setDataSize(0).setRecursive(true).build(); + writeClient.deleteKey(delArgs); + int pathDelLimit = conf.getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK, + OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT); + int numThread = conf.getInt(OZONE_THREAD_NUMBER_DIR_DELETION, + OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT); + + // check if difference between each run should not cross the directory deletion limit + // and wait till all dir is removed + GenericTestUtils.waitFor(() -> { + delDirCnt[1] = dirDeletingService.getDeletedDirsCount(); + assertTrue( + delDirCnt[1] - delDirCnt[0] <= ((long) pathDelLimit * numThread), + "base: " + delDirCnt[0] + ", new: " + delDirCnt[1]); + delDirCnt[0] = delDirCnt[1]; + return dirDeletingService.getDeletedDirsCount() >= dirCreatesCount; + }, 500, 300000); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 8163592cfc6..ff6506da034 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -39,13 +39,17 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.PendingKeysDeletion; import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -57,10 +61,13 @@ import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -81,12 +88,16 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -132,6 +143,7 @@ private void createConfig(File testDir) { 1, TimeUnit.SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setQuietMode(false); } @@ -285,6 +297,115 @@ void checkDeletedTableCleanUpForSnapshot() throws Exception { assertEquals(0, rangeKVs.size()); } + /* + * Create key k1 + * Create snap1 + * Rename k1 to k2 + * Delete k2 + * Wait for KeyDeletingService to start processing deleted key k2 + * Create snap2 by making the KeyDeletingService thread wait till snap2 is flushed + * Resume KeyDeletingService thread. + * Read k1 from snap1. + */ + @Test + public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() + throws Exception { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table renameTable = om.getMetadataManager().getSnapshotRenamedTable(); + + // Suspend KeyDeletingService + keyDeletingService.suspend(); + SnapshotDeletingService snapshotDeletingService = om.getKeyManager().getSnapshotDeletingService(); + snapshotDeletingService.suspend(); + GenericTestUtils.waitFor(() -> !keyDeletingService.isRunningOnAOS(), 1000, 10000); + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); + OzoneManager ozoneManager = Mockito.spy(om); + OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager()); + KeyManager km = Mockito.spy(new KeyManagerImpl(ozoneManager, ozoneManager.getScmClient(), conf, + om.getPerfMetrics())); + when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> { + return omSnapshotManager; + }); + KeyDeletingService service = new KeyDeletingService(ozoneManager, scmBlockTestingClient, km, 10000, + 100000, conf, false); + service.shutdown(); + final long initialSnapshotCount = metadataManager.countRowsInTable(snapshotInfoTable); + final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); + final long initialRenameCount = metadataManager.countRowsInTable(renameTable); + // Create Volume and Buckets + createVolumeAndBucket(volumeName, bucketName, false); + OmKeyArgs args = createAndCommitKey(volumeName, bucketName, + "key1", 3); + String snap1 = uniqueObjectName("snap"); + String snap2 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap1); + KeyInfoWithVolumeContext keyInfo = writeClient.getKeyInfo(args, false); + AtomicLong objectId = new AtomicLong(keyInfo.getKeyInfo().getObjectID()); + renameKey(volumeName, bucketName, "key1", "key2"); + deleteKey(volumeName, bucketName, "key2"); + assertTableRowCount(deletedTable, initialDeletedCount + 1, metadataManager); + assertTableRowCount(renameTable, initialRenameCount + 1, metadataManager); + + String[] deletePathKey = {metadataManager.getOzoneDeletePathKey(objectId.get(), + metadataManager.getOzoneKey(volumeName, + bucketName, "key2"))}; + assertNotNull(deletedTable.get(deletePathKey[0])); + Mockito.doAnswer(i -> { + writeClient.createSnapshot(volumeName, bucketName, snap2); + GenericTestUtils.waitFor(() -> { + try { + SnapshotInfo snapshotInfo = writeClient.getSnapshotInfo(volumeName, bucketName, snap2); + return OmSnapshotManager.areSnapshotChangesFlushedToDB(metadataManager, snapshotInfo); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 100000); + GenericTestUtils.waitFor(() -> { + try { + return renameTable.get(metadataManager.getRenameKey(volumeName, bucketName, objectId.get())) == null; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 10000); + return i.callRealMethod(); + }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(volumeName), ArgumentMatchers.eq(bucketName), + ArgumentMatchers.eq(snap1)); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); + doAnswer(i -> { + PendingKeysDeletion pendingKeysDeletion = (PendingKeysDeletion) i.callRealMethod(); + for (BlockGroup group : pendingKeysDeletion.getKeyBlocksList()) { + Assertions.assertNotEquals(deletePathKey[0], group.getGroupID()); + } + return pendingKeysDeletion; + }).when(km).getPendingDeletionKeys(anyInt()); + service.runPeriodicalTaskNow(); + service.runPeriodicalTaskNow(); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); + // Create Key3 + OmKeyArgs args2 = createAndCommitKey(volumeName, bucketName, + "key3", 3); + keyInfo = writeClient.getKeyInfo(args2, false); + objectId.set(keyInfo.getKeyInfo().getObjectID()); + // Rename Key3 to key4 + renameKey(volumeName, bucketName, "key3", "key4"); + // Delete Key4 + deleteKey(volumeName, bucketName, "key4"); + deletePathKey[0] = metadataManager.getOzoneDeletePathKey(objectId.get(), metadataManager.getOzoneKey(volumeName, + bucketName, "key4")); + // Delete snapshot + writeClient.deleteSnapshot(volumeName, bucketName, snap2); + // Run KDS and ensure key4 doesn't get purged since snap2 has not been deleted. + service.runPeriodicalTaskNow(); + writeClient.deleteSnapshot(volumeName, bucketName, snap1); + snapshotDeletingService.resume(); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount, metadataManager); + keyDeletingService.resume(); + } + /* * Create Snap1 * Create 10 keys @@ -396,68 +517,68 @@ void testSnapshotExclusiveSize() throws Exception { final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); final long initialRenamedCount = metadataManager.countRowsInTable(renamedTable); - final String volumeName = getTestName(); - final String bucketName = uniqueObjectName("bucket"); + final String testVolumeName = getTestName(); + final String testBucketName = uniqueObjectName("bucket"); final String keyName = uniqueObjectName("key"); // Create Volume and Buckets - createVolumeAndBucket(volumeName, bucketName, false); + createVolumeAndBucket(testVolumeName, testBucketName, false); // Create 3 keys for (int i = 1; i <= 3; i++) { - createAndCommitKey(volumeName, bucketName, keyName + i, 3); + createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); } assertTableRowCount(keyTable, initialKeyCount + 3, metadataManager); // Create Snapshot1 String snap1 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap1); + writeClient.createSnapshot(testVolumeName, testBucketName, snap1); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); // Create 2 keys for (int i = 4; i <= 5; i++) { - createAndCommitKey(volumeName, bucketName, keyName + i, 3); + createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); } // Delete a key, rename 2 keys. We will be using this to test // how we handle renamed key for exclusive size calculation. - renameKey(volumeName, bucketName, keyName + 1, "renamedKey1"); - renameKey(volumeName, bucketName, keyName + 2, "renamedKey2"); - deleteKey(volumeName, bucketName, keyName + 3); + renameKey(testVolumeName, testBucketName, keyName + 1, "renamedKey1"); + renameKey(testVolumeName, testBucketName, keyName + 2, "renamedKey2"); + deleteKey(testVolumeName, testBucketName, keyName + 3); assertTableRowCount(deletedTable, initialDeletedCount + 1, metadataManager); assertTableRowCount(renamedTable, initialRenamedCount + 2, metadataManager); // Create Snapshot2 String snap2 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap2); + writeClient.createSnapshot(testVolumeName, testBucketName, snap2); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); // Create 2 keys for (int i = 6; i <= 7; i++) { - createAndCommitKey(volumeName, bucketName, keyName + i, 3); + createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); } - deleteKey(volumeName, bucketName, "renamedKey1"); - deleteKey(volumeName, bucketName, keyName + 4); + deleteKey(testVolumeName, testBucketName, "renamedKey1"); + deleteKey(testVolumeName, testBucketName, keyName + 4); // Do a second rename of already renamedKey2 - renameKey(volumeName, bucketName, "renamedKey2", "renamedKey22"); + renameKey(testVolumeName, testBucketName, "renamedKey2", "renamedKey22"); assertTableRowCount(deletedTable, initialDeletedCount + 2, metadataManager); assertTableRowCount(renamedTable, initialRenamedCount + 1, metadataManager); // Create Snapshot3 String snap3 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap3); + writeClient.createSnapshot(testVolumeName, testBucketName, snap3); // Delete 4 keys - deleteKey(volumeName, bucketName, "renamedKey22"); + deleteKey(testVolumeName, testBucketName, "renamedKey22"); for (int i = 5; i <= 7; i++) { - deleteKey(volumeName, bucketName, keyName + i); + deleteKey(testVolumeName, testBucketName, keyName + i); } // Create Snapshot4 String snap4 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap4); - createAndCommitKey(volumeName, bucketName, uniqueObjectName("key"), 3); + writeClient.createSnapshot(testVolumeName, testBucketName, snap4); + createAndCommitKey(testVolumeName, testBucketName, uniqueObjectName("key"), 3); long prevKdsRunCount = getRunCount(); keyDeletingService.resume(); @@ -468,6 +589,7 @@ void testSnapshotExclusiveSize() throws Exception { .put(snap3, 2000L) .put(snap4, 0L) .build(); + System.out.println(expectedSize); // Let KeyDeletingService to run for some iterations GenericTestUtils.waitFor( @@ -480,8 +602,10 @@ void testSnapshotExclusiveSize() throws Exception { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); String snapshotName = snapshotEntry.getValue().getName(); + Long expected = expectedSize.getOrDefault(snapshotName, 0L); assertNotNull(expected); + System.out.println(snapshotName); assertEquals(expected, snapshotEntry.getValue().getExclusiveSize()); // Since for the test we are using RATIS/THREE assertEquals(expected * 3, snapshotEntry.getValue().getExclusiveReplicatedSize()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index eeb6f2c71ea..ab22b353bd7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -118,6 +118,8 @@ void setup(@TempDir Path tempDir) throws Exception { conf.setTimeDuration(OZONE_OM_LEASE_HARD_LIMIT, EXPIRE_THRESHOLD_MS, TimeUnit.MILLISECONDS); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.setQuietMode(false); OmTestManagers omTestManagers = new OmTestManagers(conf); @@ -164,8 +166,7 @@ public void testCleanupExpiredOpenKeys( // wait for submitted tasks to complete Thread.sleep(SERVICE_INTERVAL); final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); - final long oldrunCount = openKeyCleanupService.getRunCount(); - LOG.info("oldkeyCount={}, oldrunCount={}", oldkeyCount, oldrunCount); + LOG.info("oldkeyCount={}", oldkeyCount); final OMMetrics metrics = om.getMetrics(); long numKeyHSyncs = metrics.getNumKeyHSyncs(); @@ -187,9 +188,6 @@ public void testCleanupExpiredOpenKeys( GenericTestUtils.waitFor( () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount + keyCount, SERVICE_INTERVAL, WAIT_TIME); - GenericTestUtils.waitFor( - () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, - SERVICE_INTERVAL, WAIT_TIME); waitForOpenKeyCleanup(false, BucketLayout.DEFAULT); waitForOpenKeyCleanup(hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED); @@ -330,8 +328,7 @@ public void testExcludeMPUOpenKeys( // wait for submitted tasks to complete Thread.sleep(SERVICE_INTERVAL); final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); - final long oldrunCount = openKeyCleanupService.getRunCount(); - LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount); + LOG.info("oldMpuKeyCount={}", oldkeyCount); final OMMetrics metrics = om.getMetrics(); long numKeyHSyncs = metrics.getNumKeyHSyncs(); @@ -351,13 +348,8 @@ public void testExcludeMPUOpenKeys( BucketLayout.FILE_SYSTEM_OPTIMIZED); openKeyCleanupService.resume(); - - GenericTestUtils.waitFor( - () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, - SERVICE_INTERVAL, WAIT_TIME); - - // wait for requests to complete - Thread.sleep(SERVICE_INTERVAL); + // wait for openKeyCleanupService to complete at least once + Thread.sleep(SERVICE_INTERVAL * 2); // No expired open keys fetched assertEquals(openKeyCleanupService.getSubmittedOpenKeyCount(), oldkeyCount); @@ -395,8 +387,7 @@ public void testCleanupExpiredOpenMPUPartKeys( // wait for submitted tasks to complete Thread.sleep(SERVICE_INTERVAL); final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); - final long oldrunCount = openKeyCleanupService.getRunCount(); - LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount); + LOG.info("oldMpuKeyCount={},", oldkeyCount); final OMMetrics metrics = om.getMetrics(); long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned(); @@ -421,9 +412,6 @@ public void testCleanupExpiredOpenMPUPartKeys( GenericTestUtils.waitFor( () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount + partCount, SERVICE_INTERVAL, WAIT_TIME); - GenericTestUtils.waitFor( - () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, - SERVICE_INTERVAL, WAIT_TIME); // No expired MPU parts fetched waitForOpenKeyCleanup(false, BucketLayout.DEFAULT); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java index 06b8beacb39..18625fe5c44 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java @@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -38,13 +39,13 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; import org.apache.hadoop.ozone.om.request.volume.OMQuotaRepairRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.volume.OMQuotaRepairResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; @@ -57,16 +58,16 @@ public class TestQuotaRepairTask extends TestOMKeyRequest { @Test public void testQuotaRepair() throws Exception { - when(ozoneManager.isRatisEnabled()).thenReturn(false); + when(ozoneManager.isRatisEnabled()).thenReturn(true); OzoneManagerProtocolProtos.OMResponse respMock = mock(OzoneManagerProtocolProtos.OMResponse.class); when(respMock.getSuccess()).thenReturn(true); - OzoneManagerProtocolServerSideTranslatorPB serverMock = mock(OzoneManagerProtocolServerSideTranslatorPB.class); + OzoneManagerRatisServer ratisServerMock = mock(OzoneManagerRatisServer.class); AtomicReference ref = new AtomicReference<>(); doAnswer(invocation -> { - ref.set(invocation.getArgument(1, OzoneManagerProtocolProtos.OMRequest.class)); + ref.set(invocation.getArgument(0, OzoneManagerProtocolProtos.OMRequest.class)); return respMock; - }).when(serverMock).submitRequest(any(), any()); - when(ozoneManager.getOmServerProtocol()).thenReturn(serverMock); + }).when(ratisServerMock).submitRequest(any(), any(), anyLong()); + when(ozoneManager.getOmRatisServer()).thenReturn(ratisServerMock); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.OBJECT_STORE); @@ -135,16 +136,16 @@ public void testQuotaRepair() throws Exception { @Test public void testQuotaRepairForOldVersionVolumeBucket() throws Exception { - when(ozoneManager.isRatisEnabled()).thenReturn(false); + when(ozoneManager.isRatisEnabled()).thenReturn(true); OzoneManagerProtocolProtos.OMResponse respMock = mock(OzoneManagerProtocolProtos.OMResponse.class); when(respMock.getSuccess()).thenReturn(true); - OzoneManagerProtocolServerSideTranslatorPB serverMock = mock(OzoneManagerProtocolServerSideTranslatorPB.class); + OzoneManagerRatisServer ratisServerMock = mock(OzoneManagerRatisServer.class); AtomicReference ref = new AtomicReference<>(); doAnswer(invocation -> { - ref.set(invocation.getArgument(1, OzoneManagerProtocolProtos.OMRequest.class)); + ref.set(invocation.getArgument(0, OzoneManagerProtocolProtos.OMRequest.class)); return respMock; - }).when(serverMock).submitRequest(any(), any()); - when(ozoneManager.getOmServerProtocol()).thenReturn(serverMock); + }).when(ratisServerMock).submitRequest(any(), any(), anyLong()); + when(ozoneManager.getOmRatisServer()).thenReturn(ratisServerMock); // add volume with -2 value OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder().setCreationTime(Time.now()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java index 3948f4fab80..e04891da83a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java @@ -20,7 +20,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshotManager; @@ -56,25 +57,26 @@ public class TestSnapshotDeletingService { private SnapshotChainManager chainManager; @Mock private OmMetadataManagerImpl omMetadataManager; - @Mock - private ScmBlockLocationProtocol scmClient; private final OzoneConfiguration conf = new OzoneConfiguration();; private final long sdsRunInterval = Duration.ofMillis(1000).toMillis(); private final long sdsServiceTimeout = Duration.ofSeconds(10).toMillis(); - private static Stream testCasesForIgnoreSnapshotGc() { - SnapshotInfo filteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true).setName("snap1").build(); - SnapshotInfo unFilteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1").build(); + private static Stream testCasesForIgnoreSnapshotGc() throws IOException { + SnapshotInfo flushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true) + .setLastTransactionInfo(TransactionInfo.valueOf(1, 1).toByteString()) + .setName("snap1").build(); + SnapshotInfo unFlushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1") + .setLastTransactionInfo(TransactionInfo.valueOf(0, 0).toByteString()).build(); return Stream.of( - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); } @ParameterizedTest @@ -87,9 +89,15 @@ public void testProcessSnapshotLogicInSDS(SnapshotInfo snapshotInfo, Mockito.when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); Mockito.when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); Mockito.when(ozoneManager.getConfiguration()).thenReturn(conf); + if (status == SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED) { + Table transactionInfoTable = Mockito.mock(Table.class); + Mockito.when(omMetadataManager.getTransactionInfoTable()).thenReturn(transactionInfoTable); + Mockito.when(transactionInfoTable.getSkipCache(Mockito.anyString())) + .thenReturn(TransactionInfo.valueOf(1, 1)); + } SnapshotDeletingService snapshotDeletingService = - new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager, scmClient); + new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager); snapshotInfo.setSnapshotStatus(status); assertEquals(expectedOutcome, snapshotDeletingService.shouldIgnoreSnapshot(snapshotInfo)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index c5ae809718e..f49bfc33976 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.collect.ImmutableMap; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -38,6 +39,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -168,6 +170,7 @@ public void testAddSnapshot() throws Exception { } assertEquals(snapshotID3, chainManager.getLatestGlobalSnapshotId()); + assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID3, chainManager.getLatestPathSnapshotId( String.join("/", "vol1", "bucket1"))); @@ -285,6 +288,7 @@ public void testChainFromLoadFromTable(boolean increasingTIme) assertFalse(chainManager.isSnapshotChainCorrupted()); // check if snapshots loaded correctly from snapshotInfoTable assertEquals(snapshotID2, chainManager.getLatestGlobalSnapshotId()); + assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID2, chainManager.nextGlobalSnapshot(snapshotID1)); assertEquals(snapshotID1, chainManager.previousPathSnapshot(String .join("/", "vol1", "bucket1"), snapshotID2)); @@ -305,6 +309,34 @@ public void testChainFromLoadFromTable(boolean increasingTIme) () -> chainManager.nextGlobalSnapshot(snapshotID1)); } + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 5, 10}) + public void testSnapshotChainIterator(int numberOfSnapshots) throws IOException { + Table snapshotInfo = omMetadataManager.getSnapshotInfoTable(); + List snapshotInfoList = new ArrayList<>(); + + UUID prevSnapshotID = null; + long time = System.currentTimeMillis(); + for (int i = 0; i < numberOfSnapshots; i++) { + UUID snapshotID = UUID.randomUUID(); + SnapshotInfo snapInfo = createSnapshotInfo(snapshotID, prevSnapshotID, + prevSnapshotID, time++); + snapshotInfo.put(snapshotID.toString(), snapInfo); + prevSnapshotID = snapshotID; + snapshotInfoList.add(snapInfo); + } + chainManager = new SnapshotChainManager(omMetadataManager); + assertFalse(chainManager.isSnapshotChainCorrupted()); + List reverseChain = Lists.newArrayList(chainManager.iterator(true)); + Collections.reverse(reverseChain); + List forwardChain = Lists.newArrayList(chainManager.iterator(false)); + List expectedChain = snapshotInfoList.stream().map(SnapshotInfo::getSnapshotId).collect(Collectors.toList()); + assertEquals(expectedChain, reverseChain); + assertEquals(expectedChain, forwardChain); + assertEquals(forwardChain, reverseChain); + + } + private static Stream invalidSnapshotChain() { List nodes = IntStream.range(0, 5) .mapToObj(i -> UUID.randomUUID()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index d07372c4fc6..037e54d0008 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -69,6 +69,7 @@ import org.apache.ratis.util.TimeDuration; import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -82,6 +83,7 @@ import org.mockito.Mock; import org.mockito.MockedConstruction; import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; @@ -92,7 +94,6 @@ import org.rocksdb.RocksIterator; import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -364,7 +365,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { omSnapshotManager = mock(OmSnapshotManager.class); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10, omMetrics, 0); when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) @@ -412,7 +412,7 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { } @ParameterizedTest - @ValueSource(ints = {1, 2, 5, 10, 100, 1000, 10000}) + @ValueSource(ints = {0, 1, 2, 5, 10, 100, 1000, 10000}) public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); @@ -430,7 +430,7 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), eq(diffDir)) - ).thenReturn(Lists.newArrayList(randomStrings)); + ).thenReturn(Optional.of(Lists.newArrayList(randomStrings))); ReferenceCounted rcFromSnapshot = omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); @@ -442,14 +442,20 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap2); when(jobTableIterator.isValid()).thenReturn(false); - Set deltaFiles = snapshotDiffManager.getDeltaFiles( - fromSnapshot, - toSnapshot, - Arrays.asList("cf1", "cf2"), fromSnapshotInfo, - toSnapshotInfo, false, - Collections.emptyMap(), diffDir); - assertEquals(randomStrings, deltaFiles); - + try (MockedStatic mockedRdbUtil = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS); + MockedStatic mockedRocksDiffUtils = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedRdbUtil.when(() -> RdbUtil.getSSTFilesForComparison(any(), any())) + .thenReturn(Collections.singleton(RandomStringUtils.randomAlphabetic(10))); + mockedRocksDiffUtils.when(() -> RocksDiffUtils.filterRelevantSstFiles(any(), any())).thenAnswer(i -> null); + Set deltaFiles = snapshotDiffManager.getDeltaFiles( + fromSnapshot, + toSnapshot, + Arrays.asList("cf1", "cf2"), fromSnapshotInfo, + toSnapshotInfo, false, + Collections.emptyMap(), diffDir); + assertEquals(randomStrings, deltaFiles); + } rcFromSnapshot.close(); rcToSnapshot.close(); } @@ -477,7 +483,8 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap())) + RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap(), anyMap(), any(ManagedRocksDB.class), + any(ManagedRocksDB.class))) .thenAnswer((Answer) invocationOnMock -> { invocationOnMock.getArgument(0, Set.class).stream() .findAny().ifPresent(val -> { @@ -498,7 +505,7 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), anyString())) - .thenReturn(Collections.emptyList()); + .thenReturn(Optional.ofNullable(Collections.emptyList())); } ReferenceCounted rcFromSnapshot = @@ -544,7 +551,8 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap())) + RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap(), anyMap(), any(ManagedRocksDB.class), + any(ManagedRocksDB.class))) .thenAnswer((Answer) invocationOnMock -> { invocationOnMock.getArgument(0, Set.class).stream() .findAny().ifPresent(val -> { @@ -561,7 +569,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); - doThrow(new FileNotFoundException("File not found exception.")) + doThrow(new RuntimeException("File not found exception.")) .when(differ) .getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), @@ -1512,6 +1520,27 @@ private void setupMocksForRunningASnapDiff( when(bucketInfoTable.get(bucketKey)).thenReturn(bucketInfo); } + @Test + public void testGetDeltaFilesWithFullDiff() throws IOException { + SnapshotDiffManager spy = spy(snapshotDiffManager); + OmSnapshot fromSnapshot = getMockedOmSnapshot(UUID.randomUUID()); + OmSnapshot toSnapshot = getMockedOmSnapshot(UUID.randomUUID()); + Mockito.doAnswer(invocation -> { + OmSnapshot snapshot = invocation.getArgument(0); + if (snapshot == fromSnapshot) { + return Sets.newHashSet("1", "2", "3"); + } + if (snapshot == toSnapshot) { + return Sets.newHashSet("3", "4", "5"); + } + return Sets.newHashSet("6", "7", "8"); + }).when(spy).getSSTFileListForSnapshot(Mockito.any(OmSnapshot.class), + Mockito.anyList()); + Set deltaFiles = spy.getDeltaFiles(fromSnapshot, toSnapshot, Collections.emptyList(), snapshotInfo, + snapshotInfo, true, Collections.emptyMap(), null); + Assertions.assertEquals(Sets.newHashSet("1", "2", "3", "4", "5"), deltaFiles); + } + @Test public void testGetSnapshotDiffReportHappyCase() throws Exception { SnapshotInfo fromSnapInfo = snapshotInfo; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index dc00433e179..29e0115861f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -19,12 +19,18 @@ package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; import org.apache.hadoop.util.Time; +import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -35,6 +41,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -114,4 +121,44 @@ public void testSnapshotSSTFilteredFlag() throws Exception { snapshotInfo.put(EXPECTED_SNAPSHOT_KEY, info); assertTrue(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).isSstFiltered()); } + + @Test + public void testLastTransactionInfo() throws Exception { + Table snapshotInfo = + omMetadataManager.getSnapshotInfoTable(); + SnapshotInfo info = createSnapshotInfo(); + snapshotInfo.put(EXPECTED_SNAPSHOT_KEY, info); + assertNull(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).getLastTransactionInfo()); + // checking if true value is returned when snapshot is null. + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, (SnapshotInfo)null)); + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(0, 0)); + // Checking if changes have been flushed when lastTransactionInfo is null + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, info)); + TermIndex termIndex = TermIndex.valueOf(1, 1); + info.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + // Checking if changes to snapshot object has been updated but not updated on cache or disk. + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + snapshotInfo.addCacheEntry(new CacheKey<>(EXPECTED_SNAPSHOT_KEY), CacheValue.get(termIndex.getIndex(), info)); + + assertEquals(snapshotInfo.get(EXPECTED_SNAPSHOT_KEY).getLastTransactionInfo(), info.getLastTransactionInfo()); + + // Checking if changes have not been flushed when snapshot last transaction info is behind OmTransactionTable value. + assertFalse(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + omMetadataManager.getTransactionInfoTable().addCacheEntry(new CacheKey<>(OzoneConsts.TRANSACTION_INFO_KEY), + CacheValue.get(termIndex.getIndex(), TransactionInfo.valueOf(1, 1))); + assertFalse(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + + // Checking changes are flushed when transaction is equal. + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, + TransactionInfo.valueOf(1, 1)); + + + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + // Checking changes are flushed when transactionIndex is greater . + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(1, 2)); + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + // Checking changes are flushed when both term & transactionIndex is greater. + omMetadataManager.getTransactionInfoTable().put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(2, 2)); + assertTrue(OmSnapshotManager.areSnapshotChangesFlushedToDB(omMetadataManager, EXPECTED_SNAPSHOT_KEY)); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java new file mode 100644 index 00000000000..b037b68fd72 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Base class to test snapshot functionalities. + */ +public class TestSnapshotRequestAndResponse { + @TempDir + private File testDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + private OmSnapshotManager omSnapshotManager; + + private String volumeName; + private String bucketName; + private boolean isAdmin; + + public BatchOperation getBatchOperation() { + return batchOperation; + } + + public String getBucketName() { + return bucketName; + } + + public boolean isAdmin() { + return isAdmin; + } + + public OmMetadataManagerImpl getOmMetadataManager() { + return omMetadataManager; + } + + public OMMetrics getOmMetrics() { + return omMetrics; + } + + public OmSnapshotManager getOmSnapshotManager() { + return omSnapshotManager; + } + + public OzoneManager getOzoneManager() { + return ozoneManager; + } + + public File getTestDir() { + return testDir; + } + + public String getVolumeName() { + return volumeName; + } + + protected TestSnapshotRequestAndResponse() { + this.isAdmin = false; + } + + protected TestSnapshotRequestAndResponse(boolean isAdmin) { + this.isAdmin = isAdmin; + } + + @BeforeEach + public void baseSetup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + testDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + testDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + when(ozoneManager.resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), + i.getArgument(0), "dummyBucketOwner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(isAdmin); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + IAccessAuthorizer accessAuthorizer = mock(IAccessAuthorizer.class); + when(ozoneManager.getAccessAuthorizer()).thenReturn(accessAuthorizer); + when(accessAuthorizer.isNative()).thenReturn(false); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } + } + + protected Path createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { + OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils + .createSnapshotRequest(volume, bucket, snapshotName); + // Pre-Execute OMSnapshotCreateRequest. + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); + + // validateAndUpdateCache OMSnapshotCreateResponse. + OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + // Add to batch and commit to DB. + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } + + String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); + SnapshotInfo snapshotInfo = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNotNull(snapshotInfo); + + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), + checkpointPrefix + snapshotInfo.getCheckpointDir()); + // Check the DB is still there + assertTrue(Files.exists(snapshotDirPath)); + return snapshotDirPath; + } + + protected List>> getDeletedKeys(String volume, String bucket, + int startRange, int endRange, + int numberOfKeys, + int minVersion) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> Pair.of(omMetadataManager.getOzoneDeletePathKey(i, + omMetadataManager.getOzoneKey(volume, bucket, "key" + String.format("%010d", i))), + IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, + ReplicationConfig.getDefault(ozoneManager.getConfiguration()), + new OmKeyLocationInfoGroup(minVersion + cnt, new ArrayList<>(), false)) + .setCreationTime(0).setModificationTime(0).build()) + .collect(Collectors.toList()))) + .collect(Collectors.toList()); + } + + protected List> getRenameKeys(String volume, String bucket, + int startRange, int endRange, + String renameKeyPrefix) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> { + try { + return Pair.of(omMetadataManager.getRenameKey(volume, bucket, i), + omMetadataManager.getOzoneKeyFSO(volume, bucket, renameKeyPrefix + i)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toList()); + } + + protected List>> getDeletedDirKeys(String volume, String bucket, + int startRange, int endRange, int numberOfKeys) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> { + try { + return Pair.of(omMetadataManager.getOzoneDeletePathKey(i, + omMetadataManager.getOzoneKeyFSO(volume, bucket, "1/key" + i)), + IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, + ReplicationConfig.getDefault(ozoneManager.getConfiguration())).build()) + .collect(Collectors.toList())); + } catch (IOException e) { + throw new RuntimeException(e); + } + }) + .collect(Collectors.toList()); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java new file mode 100644 index 00000000000..996cab08277 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java @@ -0,0 +1,175 @@ +package org.apache.hadoop.ozone.protocolPB; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.ListKeysLightResult; +import org.apache.hadoop.ozone.om.helpers.ListKeysResult; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; + +/** + * Test class to test out OzoneManagerRequestHandler. + */ +public class TestOzoneManagerRequestHandler { + + + private OzoneManagerRequestHandler getRequestHandler(int limitListKeySize) { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, limitListKeySize); + OzoneManager ozoneManager = Mockito.mock(OzoneManager.class); + Mockito.when(ozoneManager.getConfiguration()).thenReturn(conf); + return new OzoneManagerRequestHandler(ozoneManager); + } + + private OmKeyInfo getMockedOmKeyInfo() { + OmKeyInfo keyInfo = Mockito.mock(OmKeyInfo.class); + OzoneManagerProtocolProtos.KeyInfo info = + OzoneManagerProtocolProtos.KeyInfo.newBuilder().setBucketName("bucket").setKeyName("key").setVolumeName( + "volume").setDataSize(0).setType(HddsProtos.ReplicationType.RATIS).setCreationTime(0) + .setModificationTime(0).build(); + Mockito.when(keyInfo.getProtobuf(Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(info); + Mockito.when(keyInfo.getProtobuf(Mockito.anyInt())).thenReturn(info); + return keyInfo; + } + + private BasicOmKeyInfo getMockedBasicOmKeyInfo() { + BasicOmKeyInfo keyInfo = Mockito.mock(BasicOmKeyInfo.class); + Mockito.when(keyInfo.getProtobuf()).thenReturn( + OzoneManagerProtocolProtos.BasicKeyInfo.newBuilder().setKeyName("key").setDataSize(0) + .setType(HddsProtos.ReplicationType.RATIS).setCreationTime(0).setModificationTime(0) + .build()); + return keyInfo; + } + + private OzoneFileStatus getMockedOzoneFileStatus() { + return new OzoneFileStatus(getMockedOmKeyInfo(), 256, false); + } + + private void mockOmRequest(OzoneManagerProtocolProtos.OMRequest request, + OzoneManagerProtocolProtos.Type cmdType, + int requestSize) { + Mockito.when(request.getTraceID()).thenReturn("traceId"); + Mockito.when(request.getCmdType()).thenReturn(cmdType); + switch (cmdType) { + case ListKeysLight: + case ListKeys: + Mockito.when(request.getListKeysRequest()).thenReturn(OzoneManagerProtocolProtos.ListKeysRequest.newBuilder() + .setCount(requestSize).setBucketName("bucket").setVolumeName("volume").setPrefix("").setStartKey("") + .build()); + break; + case ListStatus: + Mockito.when(request.getListStatusRequest()).thenReturn(OzoneManagerProtocolProtos.ListStatusRequest.newBuilder() + .setNumEntries(requestSize).setKeyArgs(OzoneManagerProtocolProtos.KeyArgs.newBuilder().setBucketName( + "bucket").setVolumeName("volume").setKeyName("keyName") + .setLatestVersionLocation(true).setHeadOp(true)).setRecursive(true).setStartKey("") + .build()); + break; + default: + break; + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 9, 10, 11, 50}) + public void testListKeysResponseSize(int resultSize) throws IOException { + List keyInfos = IntStream.range(0, resultSize).mapToObj(i -> getMockedOmKeyInfo()).collect( + Collectors.toList()); + OzoneManagerRequestHandler requestHandler = getRequestHandler(10); + OzoneManager ozoneManager = requestHandler.getOzoneManager(); + Mockito.when(ozoneManager.listKeys(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), + Mockito.anyString(), Mockito.anyInt())).thenAnswer(i -> { + int maxSize = Math.max(Math.min(resultSize, i.getArgument(4)), 0); + return new ListKeysResult(keyInfos.isEmpty() ? keyInfos : keyInfos.subList(0, maxSize), + maxSize < resultSize); + }); + OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class); + for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) { + mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListKeys, requestSize); + OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request); + int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0); + Assertions.assertEquals(expectedSize, omResponse.getListKeysResponse().getKeyInfoList().size()); + Assertions.assertEquals(expectedSize < resultSize, omResponse.getListKeysResponse().getIsTruncated()); + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 9, 10, 11, 50}) + public void testListLightKeysResponseSize(int resultSize) throws IOException { + List keyInfos = IntStream.range(0, resultSize).mapToObj(i -> getMockedBasicOmKeyInfo()).collect( + Collectors.toList()); + OzoneManagerRequestHandler requestHandler = getRequestHandler(10); + OzoneManager ozoneManager = requestHandler.getOzoneManager(); + Mockito.when(ozoneManager.listKeysLight(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), + Mockito.anyString(), Mockito.anyInt())).thenAnswer(i -> { + int maxSize = Math.max(Math.min(resultSize, i.getArgument(4)), 0); + return new ListKeysLightResult(keyInfos.isEmpty() ? keyInfos : keyInfos.subList(0, maxSize), + maxSize < resultSize); + }); + OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class); + for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) { + mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListKeysLight, requestSize); + OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request); + int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0); + Assertions.assertEquals(expectedSize, omResponse.getListKeysLightResponse().getBasicKeyInfoList().size()); + Assertions.assertEquals(expectedSize < resultSize, + omResponse.getListKeysLightResponse().getIsTruncated()); + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 9, 10, 11, 50}) + public void testListStatusResponseSize(int resultSize) throws IOException { + List statusList = IntStream.range(0, resultSize).mapToObj(i -> getMockedOzoneFileStatus()) + .collect(Collectors.toList()); + OzoneManagerRequestHandler requestHandler = getRequestHandler(10); + OzoneManager ozoneManager = requestHandler.getOzoneManager(); + Mockito.when(ozoneManager.listStatus(Mockito.any(OmKeyArgs.class), Mockito.anyBoolean(), Mockito.anyString(), + Mockito.anyLong(), Mockito.anyBoolean())).thenAnswer(i -> { + long maxSize = i.getArgument(3); + maxSize = Math.max(Math.min(resultSize, maxSize), 0); + return statusList.isEmpty() ? statusList : statusList.subList(0, (int) maxSize); + }); + OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class); + for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) { + mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListStatus, requestSize); + OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request); + int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0); + Assertions.assertEquals(expectedSize, omResponse.getListStatusResponse().getStatusesList().size()); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/SecretKeyTestClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/SecretKeyTestClient.java new file mode 100644 index 00000000000..32ef5988e10 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/SecretKeyTestClient.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.security; + +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; + +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +/** + * Test implementation of {@link SecretKeyClient}. + */ +public class SecretKeyTestClient implements SecretKeyClient { + private final Map keysMap = new HashMap<>(); + private ManagedSecretKey current; + + public SecretKeyTestClient() { + rotate(); + } + + public void rotate() { + this.current = generateKey(); + keysMap.put(current.getId(), current); + } + + @Override + public ManagedSecretKey getCurrentSecretKey() { + return current; + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) { + return keysMap.get(id); + } + + private ManagedSecretKey generateKey() { + KeyGenerator keyGen = null; + try { + keyGen = KeyGenerator.getInstance("HmacSHA256"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("Should never happen", e); + } + SecretKey secretKey = keyGen.generateKey(); + return new ManagedSecretKey( + UUID.randomUUID(), + Instant.now(), + Instant.now().plus(Duration.ofHours(1)), + secretKey + ); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java index d94f59b8fb8..a9239b5639a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java @@ -24,7 +24,6 @@ import java.security.KeyPair; import java.security.PrivateKey; import java.security.PublicKey; -import java.security.Signature; import java.security.cert.CertPath; import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; @@ -35,6 +34,8 @@ import com.google.common.collect.ImmutableList; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.io.Text; @@ -50,13 +51,14 @@ import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.upgrade.LayoutFeature; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Time; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3AUTHINFO; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -64,11 +66,14 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import org.slf4j.event.Level; +import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.protocol.RaftPeerId; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -87,6 +92,7 @@ public class TestOzoneDelegationTokenSecretManager { private OzoneDelegationTokenSecretManager secretManager; private SecurityConfig securityConfig; private OMCertificateClient certificateClient; + private SecretKeyClient secretKeyClient; private long expiryTime; private Text serviceRpcAdd; private OzoneConfiguration conf; @@ -102,6 +108,7 @@ public void setUp() throws Exception { securityConfig = new SecurityConfig(conf); certificateClient = setupCertificateClient(); certificateClient.init(); + secretKeyClient = new SecretKeyTestClient(); expiryTime = Time.monotonicNow() + 60 * 60 * 24; serviceRpcAdd = new Text("localhost"); final Map s3Secrets = new HashMap<>(); @@ -112,6 +119,9 @@ public void setUp() throws Exception { om = mock(OzoneManager.class); OMMetadataManager metadataManager = new OmMetadataManagerImpl(conf, om); when(om.getMetadataManager()).thenReturn(metadataManager); + OMLayoutVersionManager versionManager = mock(OMLayoutVersionManager.class); + when(versionManager.isAllowed(any(LayoutFeature.class))).thenReturn(true); + when(om.getVersionManager()).thenReturn(versionManager); s3SecretManager = new S3SecretLockedManager( new S3SecretManagerImpl(new S3SecretStoreMap(s3Secrets), mock(S3SecretCache.class)), @@ -121,14 +131,6 @@ public void setUp() throws Exception { private OzoneConfiguration createNewTestPath() throws IOException { OzoneConfiguration config = new OzoneConfiguration(); - // When ratis is enabled, tokens are not updated to the store directly by - // OzoneDelegationTokenSecretManager. Tokens are updated via Ratis - // through the DoubleBuffer. Hence, to test - // OzoneDelegationTokenSecretManager, we should disable OM Ratis. - // TODO: Once HA and non-HA code paths are merged in - // OzoneDelegationTokenSecretManager, this test should be updated to - // test both ratis enabled and disabled case. - config.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); File newFolder = folder.toFile(); if (!newFolder.exists()) { assertTrue(newFolder.mkdirs()); @@ -246,6 +248,7 @@ private void testRenewTokenSuccessHelper(boolean restartSecretManager) Token token = secretManager.createToken(TEST_USER, TEST_USER, TEST_USER); + addToTokenStore(token); Thread.sleep(10 * 5); if (restartSecretManager) { @@ -253,6 +256,7 @@ private void testRenewTokenSuccessHelper(boolean restartSecretManager) } long renewalTime = secretManager.renewToken(token, TEST_USER.toString()); + addToTokenStore(token); assertThat(renewalTime).isGreaterThan(0); } @@ -276,6 +280,7 @@ public void testRenewTokenFailure() throws Exception { secretManager.start(certificateClient); Token token = secretManager.createToken(TEST_USER, TEST_USER, TEST_USER); + addToTokenStore(token); AccessControlException exception = assertThrows(AccessControlException.class, () -> secretManager.renewToken(token, "rougeUser")); @@ -343,6 +348,7 @@ public void testCancelTokenSuccess() throws Exception { secretManager.start(certificateClient); Token token = secretManager.createToken(TEST_USER, TEST_USER, TEST_USER); + addToTokenStore(token); secretManager.cancelToken(token, TEST_USER.toString()); } @@ -368,12 +374,28 @@ public void testVerifySignatureSuccess() throws Exception { expiryTime, TOKEN_REMOVER_SCAN_INTERVAL); secretManager.start(certificateClient); OzoneTokenIdentifier id = new OzoneTokenIdentifier(); + id.setMaxDate(Time.now() + 60 * 60 * 24); + id.setOwner(new Text("test")); + id.setSecretKeyId(secretKeyClient.getCurrentSecretKey().getId().toString()); + assertTrue(secretManager.verifySignature(id, secretKeyClient.getCurrentSecretKey().sign(id.getBytes()))); + } + + @Test + public void testVerifyAsymmetricSignatureSuccess() throws Exception { + GenericTestUtils.setLogLevel(OzoneDelegationTokenSecretManager.LOG, Level.DEBUG); + GenericTestUtils.LogCapturer logCapturer = + GenericTestUtils.LogCapturer.captureLogs(OzoneDelegationTokenSecretManager.LOG); + secretManager = createSecretManager(conf, TOKEN_MAX_LIFETIME, + expiryTime, TOKEN_REMOVER_SCAN_INTERVAL); + secretManager.start(certificateClient); + OzoneTokenIdentifier id = new OzoneTokenIdentifier(); id.setOmCertSerialId(certificateClient.getCertificate() .getSerialNumber().toString()); id.setMaxDate(Time.now() + 60 * 60 * 24); id.setOwner(new Text("test")); - assertTrue(secretManager.verifySignature(id, - certificateClient.signData(id.getBytes()))); + assertTrue(secretManager.verifySignature(id, certificateClient.signData(id.getBytes()))); + assertTrue(logCapturer.getOutput().contains("Verify an asymmetric key signed Token")); + logCapturer.stopCapturing(); } @Test @@ -461,12 +483,9 @@ public void testValidateS3AUTHINFOFailure() throws Exception { * Validate hash using public key of KeyPair. */ private void validateHash(byte[] hash, byte[] identifier) throws Exception { - Signature rsaSignature = - Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - rsaSignature.initVerify(certificateClient.getPublicKey()); - rsaSignature.update(identifier); - assertTrue(rsaSignature.verify(hash)); + OzoneTokenIdentifier ozoneTokenIdentifier = OzoneTokenIdentifier.readProtoBuf(identifier); + ManagedSecretKey verifyKey = secretKeyClient.getSecretKey(UUID.fromString(ozoneTokenIdentifier.getSecretKeyId())); + verifyKey.isValidSignature(identifier, hash); } /** @@ -485,6 +504,14 @@ private void validateHash(byte[] hash, byte[] identifier) throws Exception { .setS3SecretManager(s3SecretManager) .setCertificateClient(certificateClient) .setOmServiceId(OzoneConsts.OM_SERVICE_ID_DEFAULT) + .setSecretKeyClient(secretKeyClient) .build(); } + + private void addToTokenStore(Token token) throws IOException { + OzoneTokenIdentifier ozoneTokenIdentifier = OzoneTokenIdentifier. + readProtoBuf(token.getIdentifier()); + long renewDate = secretManager.updateToken(token, ozoneTokenIdentifier, expiryTime); + om.getMetadataManager().getDelegationTokenTable().put(ozoneTokenIdentifier, renewDate); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index c354864a529..ab773f6d718 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -169,8 +169,7 @@ private void createKey(String volume, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(0) - .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), - testUgi.getGroupNames(), ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)) .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index c3ec7843a6f..a6e6f13ae34 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -344,8 +344,7 @@ private OzoneObjInfo createKey(String volume, String bucket, String keyName) HddsProtos.ReplicationFactor.ONE)) .setDataSize(0) // here we give test ugi full access - .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), - testUgi.getGroupNames(), ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)) .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index 7c1aad0723b..98e7ce7be85 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -142,11 +142,9 @@ private static void prepareTestKeys() throws IOException { UserGroupInformation.getCurrentUser().getShortUserName()) .setDataSize(0); if (k == 0) { - keyArgsBuilder.setAcls(OzoneAclUtil.getAclList( - testUgi.getUserName(), testUgi.getGroupNames(), ALL, ALL)); + keyArgsBuilder.setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)); } else { - keyArgsBuilder.setAcls(OzoneAclUtil.getAclList( - testUgi.getUserName(), testUgi.getGroupNames(), NONE, NONE)); + keyArgsBuilder.setAcls(OzoneAclUtil.getAclList(testUgi, NONE, NONE)); } OmKeyArgs keyArgs = keyArgsBuilder.build(); OpenKeySession keySession = writeClient.createFile(keyArgs, true, diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index 6132f9bc125..18839deaee5 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-common Apache Ozone FileSystem Common jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true @@ -112,35 +112,10 @@ - - org.apache.ozone - hdds-container-service - test - org.apache.ozone hdds-hadoop-dependency-test test - - org.apache.ozone - hdds-server-framework - test - - - org.apache.ozone - hdds-server-scm - test - - - org.apache.ozone - hdds-test-utils - test - - - org.apache.ozone - ozone-manager - test - diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java index acab6d168cc..9f7551aa8f0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java @@ -46,4 +46,13 @@ public BasicOzFs(URI theUri, Configuration conf) public int getUriDefaultPort() { return -1; } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index df8ece03486..689e340ff5d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -29,6 +29,8 @@ import java.util.List; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -42,7 +44,6 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; @@ -68,21 +69,23 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.token.Token; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -90,9 +93,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Basic Implementation of the OzoneFileSystem calls. *

    @@ -193,18 +193,24 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneClientFactory.getRpcClient(conf); } objectStore = ozoneClient.getObjectStore(); - this.volume = objectStore.getVolume(volumeStr); - this.bucket = volume.getBucket(bucketStr); - bucketReplicationConfig = this.bucket.getReplicationConfig(); - nextReplicationConfigRefreshTime = - clock.millis() + bucketRepConfigRefreshPeriodMS; + try { + this.volume = objectStore.getVolume(volumeStr); + this.bucket = volume.getBucket(bucketStr); + bucketReplicationConfig = this.bucket.getReplicationConfig(); + nextReplicationConfigRefreshTime = clock.millis() + bucketRepConfigRefreshPeriodMS; - // resolve the bucket layout in case of Link Bucket - BucketLayout resolvedBucketLayout = - OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, - new HashSet<>()); + // resolve the bucket layout in case of Link Bucket + BucketLayout resolvedBucketLayout = + OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new HashSet<>()); - OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); + OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); + } catch (IOException | RuntimeException exception) { + // in case of exception, the adapter object will not be + // initialised making the client object unreachable, close the client + // to release resources in this case and rethrow. + ozoneClient.close(); + throw exception; + } this.configuredDnPort = conf.getInt( OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, @@ -432,15 +438,22 @@ public Iterator listKeys(String pathKey) throws IOException { @Override public List listStatus(String keyName, boolean recursive, String startKey, long numEntries, URI uri, - Path workingDir, String username) throws IOException { + Path workingDir, String username, boolean lite) throws IOException { try { incrementCounter(Statistic.OBJECTS_LIST, 1); - List statuses = bucket - .listStatus(keyName, recursive, startKey, numEntries); - List result = new ArrayList<>(); - for (OzoneFileStatus status : statuses) { - result.add(toFileStatusAdapter(status, username, uri, workingDir)); + if (lite) { + List statuses = bucket + .listStatusLight(keyName, recursive, startKey, numEntries); + for (OzoneFileStatusLight status : statuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir)); + } + } else { + List statuses = bucket + .listStatus(keyName, recursive, startKey, numEntries); + for (OzoneFileStatus status : statuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir)); + } } return result; } catch (OMException e) { @@ -545,6 +558,31 @@ private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status, ); } + private FileStatusAdapter toFileStatusAdapter(OzoneFileStatusLight status, + String owner, URI defaultUri, Path workingDir) { + BasicOmKeyInfo keyInfo = status.getKeyInfo(); + short replication = (short) keyInfo.getReplicationConfig() + .getRequiredNodes(); + return new FileStatusAdapter( + keyInfo.getDataSize(), + keyInfo.getReplicatedSize(), + new Path(OZONE_URI_DELIMITER + keyInfo.getKeyName()) + .makeQualified(defaultUri, workingDir), + status.isDirectory(), + replication, + status.getBlockSize(), + keyInfo.getModificationTime(), + keyInfo.getModificationTime(), + status.isDirectory() ? (short) 00777 : (short) 00666, + StringUtils.defaultIfEmpty(keyInfo.getOwnerName(), owner), + owner, + null, + getBlockLocations(null), + false, + OzoneClientUtils.isKeyErasureCode(keyInfo) + ); + } + /** * Helper method to get List of BlockLocation from OM Key info. * @param fileStatus Ozone key file status. @@ -581,16 +619,15 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { omKeyLocationInfo.getPipeline().getNodes() .forEach(dn -> { hostList.add(dn.getHostName()); - int port = dn.getPort( - DatanodeDetails.Port.Name.STANDALONE).getValue(); + int port = dn.getStandalonePort().getValue(); if (port == 0) { port = configuredDnPort; } nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index f25d9011475..ed8d99d67fa 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import com.google.common.base.Function; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; @@ -147,9 +148,6 @@ public void initialize(URI name, Configuration conf) throws IOException { OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); - hsyncEnabled = conf.getBoolean( - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, - OZONE_FS_HSYNC_ENABLED_DEFAULT); setConf(conf); Preconditions.checkNotNull(name.getScheme(), "No scheme provided in %s", name); @@ -197,6 +195,8 @@ public void initialize(URI name, Configuration conf) throws IOException { LOG.trace("Ozone URI for ozfs initialization is {}", uri); ConfigurationSource source = getConfSource(); + this.hsyncEnabled = OzoneFSUtils.canEnableHsync(source, true); + LOG.debug("hsyncEnabled = {}", hsyncEnabled); this.adapter = createAdapter(source, bucketStr, volumeStr, omHost, omPort); @@ -685,28 +685,30 @@ public FileStatus[] listStatus(Path f) throws IOException { LinkedList statuses = new LinkedList<>(); List tmpStatusList; String startKey = ""; - + int entriesAdded; do { tmpStatusList = adapter.listStatus(pathToKey(f), false, startKey, numEntries, uri, - workingDir, getUsername()) + workingDir, getUsername(), true) .stream() .map(this::convertFileStatus) .collect(Collectors.toList()); - + entriesAdded = 0; if (!tmpStatusList.isEmpty()) { if (startKey.isEmpty() || !statuses.getLast().getPath().toString() .equals(tmpStatusList.get(0).getPath().toString())) { statuses.addAll(tmpStatusList); + entriesAdded += tmpStatusList.size(); } else { statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size())); + entriesAdded += tmpStatusList.size() - 1; } startKey = pathToKey(statuses.getLast().getPath()); } // listStatus returns entries numEntries in size if available. // Any lesser number of entries indicate that the required entries have // exhausted. - } while (tmpStatusList.size() == numEntries); + } while (entriesAdded > 0); return statuses.toArray(new FileStatus[0]); @@ -948,13 +950,15 @@ public RemoteIterator listFiles(Path f, boolean recursive) public RemoteIterator listLocatedStatus(Path f) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); - return super.listLocatedStatus(f); + return new OzoneFileStatusIterator<>(f, + (stat) -> stat instanceof LocatedFileStatus ? (LocatedFileStatus) stat : new LocatedFileStatus(stat, null), + false); } @Override public RemoteIterator listStatusIterator(Path f) throws IOException { - return new OzoneFileStatusIterator<>(f); + return new OzoneFileStatusIterator<>(f, stat -> stat, true); } @Override @@ -987,7 +991,6 @@ public void setTimes(Path f, long mtime, long atime) throws IOException { String key = pathToKey(qualifiedPath); adapter.setTimes(key, mtime, atime); } - /** * A private class implementation for iterating list of file status. * @@ -1000,18 +1003,24 @@ private final class OzoneFileStatusIterator private Path p; private T curStat = null; private String startPath = ""; + private boolean lite; + private Function transformFunc; /** * Constructor to initialize OzoneFileStatusIterator. * Get the first batch of entry for iteration. * * @param p path to file/directory. + * @param transformFunc function to convert FileStatus into an expected type. + * @param lite if true it should look into fetching a lightweight keys from server. * @throws IOException */ - private OzoneFileStatusIterator(Path p) throws IOException { + private OzoneFileStatusIterator(Path p, Function transformFunc, boolean lite) throws IOException { this.p = p; + this.lite = lite; + this.transformFunc = transformFunc; // fetch the first batch of entries in the directory - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1030,7 +1039,7 @@ public boolean hasNext() throws IOException { while (curStat == null && hasNextNoFilter()) { T next; FileStatus fileStat = thisListing.get(i++); - next = (T) (fileStat); + next = this.transformFunc.apply(fileStat); curStat = next; } return curStat != null; @@ -1048,10 +1057,9 @@ private boolean hasNextNoFilter() throws IOException { return false; } if (i >= thisListing.size()) { - if (startPath != null && (thisListing.size() == listingPageSize || - thisListing.size() == listingPageSize - 1)) { + if (startPath != null && (!thisListing.isEmpty())) { // current listing is exhausted & fetch a new listing - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1086,10 +1094,11 @@ public T next() throws IOException { * * @param f * @param startPath + * @param lite if true return lightweight keys * @return list of file status. * @throws IOException */ - private List listFileStatus(Path f, String startPath) + private List listFileStatus(Path f, String startPath, boolean lite) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); @@ -1097,7 +1106,7 @@ private List listFileStatus(Path f, String startPath) List statusList; statusList = adapter.listStatus(pathToKey(f), false, startPath, - listingPageSize, uri, workingDir, getUsername()) + listingPageSize, uri, workingDir, getUsername(), lite) .stream() .map(this::convertFileStatus) .collect(Collectors.toList()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 14c297d9f47..9896ab722de 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -32,6 +32,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -49,7 +50,6 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; @@ -65,50 +65,48 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneFsServerDefaults; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.OzoneSnapshot; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; - -import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .BUCKET_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .VOLUME_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; @@ -221,7 +219,15 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. - initDefaultFsBucketLayout(conf); + try { + initDefaultFsBucketLayout(conf); + } catch (IOException | RuntimeException exception) { + // in case of exception, the adapter object will not be + // initialised making the client object unreachable, close the client + // to release resources in this case and rethrow. + ozoneClient.close(); + throw exception; + } config = conf; } finally { @@ -260,8 +266,11 @@ private void initDefaultFsBucketLayout(OzoneConfiguration conf) } } - OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist)throws IOException { - return getBucket(ofsPath.getVolumeName(), ofsPath.getBucketName(), createIfNotExist); + OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist) + throws IOException { + + return getBucket(ofsPath.getVolumeName(), ofsPath.getBucketName(), + createIfNotExist); } /** @@ -273,7 +282,8 @@ OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist)throws IOExcepti * @throws IOException Exceptions other than OMException with result code * VOLUME_NOT_FOUND or BUCKET_NOT_FOUND. */ - private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean createIfNotExist) throws IOException { + private OzoneBucket getBucket(String volumeStr, String bucketStr, + boolean createIfNotExist) throws IOException { Preconditions.checkNotNull(volumeStr); Preconditions.checkNotNull(bucketStr); @@ -283,7 +293,7 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean create "getBucket: Invalid argument: given bucket string is empty."); } - OzoneBucket bucket = null; + OzoneBucket bucket; try { bucket = proxy.getBucketDetails(volumeStr, bucketStr); @@ -295,8 +305,44 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean create OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); } catch (OMException ex) { if (createIfNotExist) { - handleVolumeOrBucketCreationOnException(volumeStr, bucketStr, ex); - // Try to get the bucket again + // getBucketDetails can throw VOLUME_NOT_FOUND when the parent volume + // doesn't exist and ACL is enabled; it can only throw BUCKET_NOT_FOUND + // when ACL is disabled. Both exceptions need to be handled. + switch (ex.getResult()) { + case VOLUME_NOT_FOUND: + // Create the volume first when the volume doesn't exist + try { + objectStore.createVolume(volumeStr); + } catch (OMException newVolEx) { + // Ignore the case where another client created the volume + if (!newVolEx.getResult().equals(VOLUME_ALREADY_EXISTS)) { + throw newVolEx; + } + } + // No break here. Proceed to create the bucket + case BUCKET_NOT_FOUND: + // When BUCKET_NOT_FOUND is thrown, we expect the parent volume + // exists, so that we don't call create volume and incur + // unnecessary ACL checks which could lead to unwanted behavior. + OzoneVolume volume = proxy.getVolumeDetails(volumeStr); + // Create the bucket + try { + // Buckets created by OFS should be in FSO layout + volume.createBucket(bucketStr, + BucketArgs.newBuilder().setBucketLayout( + this.defaultOFSBucketLayout).build()); + } catch (OMException newBucEx) { + // Ignore the case where another client created the bucket + if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) { + throw newBucEx; + } + } + break; + default: + // Throw unhandled exception + throw ex; + } + // Try get bucket again bucket = proxy.getBucketDetails(volumeStr, bucketStr); } else { throw ex; @@ -306,41 +352,6 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean create return bucket; } - private void handleVolumeOrBucketCreationOnException(String volumeStr, String bucketStr, OMException ex) - throws IOException { - // OM can throw VOLUME_NOT_FOUND when the parent volume does not exist, and in this case we may create the volume, - // OM can also throw BUCKET_NOT_FOUND when the parent bucket does not exist, and so we also may create the bucket. - // This method creates the volume and the bucket when an exception marks that they don't exist. - switch (ex.getResult()) { - case VOLUME_NOT_FOUND: - // Create the volume first when the volume doesn't exist - try { - objectStore.createVolume(volumeStr); - } catch (OMException newVolEx) { - // Ignore the case where another client created the volume - if (!newVolEx.getResult().equals(VOLUME_ALREADY_EXISTS)) { - throw newVolEx; - } - } - // No break here. Proceed to create the bucket - case BUCKET_NOT_FOUND: - // Create the bucket - try { - // Buckets created by OFS should be in FSO layout - BucketArgs defaultBucketArgs = BucketArgs.newBuilder().setBucketLayout(this.defaultOFSBucketLayout).build(); - proxy.createBucket(volumeStr, bucketStr, defaultBucketArgs); - } catch (OMException newBucEx) { - // Ignore the case where another client created the bucket - if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) { - throw newBucEx; - } - } - break; - default: - throw ex; - } - } - /** * This API returns the value what is configured at client side only. It could * differ from the server side default values. If no replication config @@ -510,40 +521,30 @@ public boolean createDirectory(String pathStr) throws IOException { LOG.trace("creating dir for path: {}", pathStr); incrementCounter(Statistic.OBJECTS_CREATED, 1); OFSPath ofsPath = new OFSPath(pathStr, config); - - String volumeName = ofsPath.getVolumeName(); - if (volumeName.isEmpty()) { + if (ofsPath.getVolumeName().isEmpty()) { // Volume name unspecified, invalid param, return failure return false; } - - String bucketName = ofsPath.getBucketName(); - if (bucketName.isEmpty()) { - // Create volume only as path only contains one element the volume. - objectStore.createVolume(volumeName); + if (ofsPath.getBucketName().isEmpty()) { + // Create volume only + objectStore.createVolume(ofsPath.getVolumeName()); return true; } - String keyStr = ofsPath.getKeyName(); try { - if (keyStr == null || keyStr.isEmpty()) { - // This is the case when the given path only contains volume and bucket. - // If the bucket does not exist, then this will throw and bucket will be created - // in handleVolumeOrBucketCreationOnException later. - proxy.getBucketDetails(volumeName, bucketName); - } else { - proxy.createDirectory(volumeName, bucketName, keyStr); + OzoneBucket bucket = getBucket(ofsPath, true); + // Empty keyStr here indicates only volume and bucket is + // given in pathStr, so getBucket above should handle the creation + // of volume and bucket. We won't feed empty keyStr to + // bucket.createDirectory as that would be a NPE. + if (keyStr != null && keyStr.length() > 0) { + bucket.createDirectory(keyStr); } } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS) { throw new FileAlreadyExistsException(e.getMessage()); } - // Create volume and bucket if they do not exist, and retry key creation. - // This call will throw an exception if it fails, or the exception is different than it handles. - handleVolumeOrBucketCreationOnException(volumeName, bucketName, e); - if (keyStr != null && !keyStr.isEmpty()) { - proxy.createDirectory(volumeName, bucketName, keyStr); - } + throw e; } return true; } @@ -719,7 +720,7 @@ private FileStatusAdapter getFileStatusForKeyOrSnapshot(OFSPath ofsPath, URI uri * * @param allUsers return trashRoots of all users if true, used by emptier * @param fs Pointer to the current OFS FileSystem - * @return + * @return {@code Collection} */ public Collection getTrashRoots(boolean allUsers, BasicRootedOzoneFileSystem fs) { @@ -785,7 +786,7 @@ public Iterator listKeys(String pathStr) throws IOException { */ private List listStatusRoot( boolean recursive, String startPath, long numEntries, - URI uri, Path workingDir, String username) throws IOException { + URI uri, Path workingDir, String username, boolean lite) throws IOException { OFSPath ofsStartPath = new OFSPath(startPath, config); // list volumes @@ -798,7 +799,7 @@ private List listStatusRoot( if (recursive) { String pathStrNextVolume = volume.getName(); res.addAll(listStatus(pathStrNextVolume, recursive, startPath, - numEntries - res.size(), uri, workingDir, username)); + numEntries - res.size(), uri, workingDir, username, lite)); } } return res; @@ -807,9 +808,10 @@ private List listStatusRoot( /** * Helper for OFS listStatus on a volume. */ + @SuppressWarnings("checkstyle:ParameterNumber") private List listStatusVolume(String volumeStr, boolean recursive, String startPath, long numEntries, - URI uri, Path workingDir, String username) throws IOException { + URI uri, Path workingDir, String username, boolean lite) throws IOException { OFSPath ofsStartPath = new OFSPath(startPath, config); // list buckets in the volume @@ -823,7 +825,7 @@ private List listStatusVolume(String volumeStr, if (recursive) { String pathStrNext = volumeStr + OZONE_URI_DELIMITER + bucket.getName(); res.addAll(listStatus(pathStrNext, recursive, startPath, - numEntries - res.size(), uri, workingDir, username)); + numEntries - res.size(), uri, workingDir, username, lite)); } } return res; @@ -833,7 +835,7 @@ private List listStatusVolume(String volumeStr, * Helper for OFS listStatus on a bucket to get all snapshots. */ private List listStatusBucketSnapshot( - String volumeName, String bucketName, URI uri) throws IOException { + String volumeName, String bucketName, URI uri, String prevSnapshot, long numberOfEntries) throws IOException { OzoneBucket ozoneBucket = getBucket(volumeName, bucketName, false); UserGroupInformation ugi = @@ -842,9 +844,9 @@ private List listStatusBucketSnapshot( String group = getGroupName(ugi); List res = new ArrayList<>(); - Iterator snapshotIter = objectStore.listSnapshot(volumeName, bucketName, null, null); + Iterator snapshotIter = objectStore.listSnapshot(volumeName, bucketName, null, prevSnapshot); - while (snapshotIter.hasNext()) { + while (snapshotIter.hasNext() && res.size() < numberOfEntries) { OzoneSnapshot ozoneSnapshot = snapshotIter.next(); if (SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE.name().equals(ozoneSnapshot.getSnapshotStatus())) { res.add(getFileStatusAdapterForBucketSnapshot( @@ -875,13 +877,15 @@ private List listStatusBucketSnapshot( * Used in making the return path qualified. * @param username User name. * Used in making the return path qualified. + * @param lite true if lightweight response needs to be returned otherwise false. * @return A list of FileStatusAdapter. * @throws IOException Bucket exception or FileNotFoundException. */ + @SuppressWarnings("checkstyle:ParameterNumber") @Override public List listStatus(String pathStr, boolean recursive, String startPath, long numEntries, URI uri, - Path workingDir, String username) throws IOException { + Path workingDir, String username, boolean lite) throws IOException { incrementCounter(Statistic.OBJECTS_LIST, 1); // Remove authority from startPath if it exists @@ -900,44 +904,53 @@ public List listStatus(String pathStr, boolean recursive, OFSPath ofsPath = new OFSPath(pathStr, config); if (ofsPath.isRoot()) { return listStatusRoot( - recursive, startPath, numEntries, uri, workingDir, username); + recursive, startPath, numEntries, uri, workingDir, username, lite); } OFSPath ofsStartPath = new OFSPath(startPath, config); if (ofsPath.isVolume()) { String startBucketPath = ofsStartPath.getNonKeyPath(); return listStatusVolume(ofsPath.getVolumeName(), - recursive, startBucketPath, numEntries, uri, workingDir, username); + recursive, startBucketPath, numEntries, uri, workingDir, username, lite); } if (ofsPath.isSnapshotPath()) { return listStatusBucketSnapshot(ofsPath.getVolumeName(), - ofsPath.getBucketName(), uri); + ofsPath.getBucketName(), uri, ofsStartPath.getSnapshotName(), numEntries); } - + List result = new ArrayList<>(); String keyName = ofsPath.getKeyName(); // Internally we need startKey to be passed into bucket.listStatus String startKey = ofsStartPath.getKeyName(); try { OzoneBucket bucket = getBucket(ofsPath, false); - List statuses; + List statuses = Collections.emptyList(); + List lightStatuses = Collections.emptyList(); if (bucket.isSourcePathExist()) { - statuses = bucket - .listStatus(keyName, recursive, startKey, numEntries); + if (lite) { + lightStatuses = bucket.listStatusLight(keyName, recursive, startKey, numEntries); + } else { + statuses = bucket.listStatus(keyName, recursive, startKey, numEntries); + } + } else { LOG.warn("Source Bucket does not exist, link bucket {} is orphan " + "and returning empty list of files inside it", bucket.getName()); - statuses = Collections.emptyList(); } // Note: result in statuses above doesn't have volume/bucket path since // they are from the server. String ofsPathPrefix = ofsPath.getNonKeyPath(); - List result = new ArrayList<>(); - for (OzoneFileStatus status : statuses) { - result.add(toFileStatusAdapter(status, username, uri, workingDir, - ofsPathPrefix)); + if (lite) { + for (OzoneFileStatusLight status : lightStatuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir, ofsPathPrefix)); + } + } else { + for (OzoneFileStatus status : statuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir, ofsPathPrefix)); + } } + return result; } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) { @@ -1040,6 +1053,31 @@ private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status, ); } + private FileStatusAdapter toFileStatusAdapter(OzoneFileStatusLight status, + String owner, URI defaultUri, Path workingDir, String ofsPathPrefix) { + BasicOmKeyInfo keyInfo = status.getKeyInfo(); + short replication = (short) keyInfo.getReplicationConfig() + .getRequiredNodes(); + return new FileStatusAdapter( + keyInfo.getDataSize(), + keyInfo.getReplicatedSize(), + new Path(ofsPathPrefix + OZONE_URI_DELIMITER + keyInfo.getKeyName()) + .makeQualified(defaultUri, workingDir), + status.isDirectory(), + replication, + status.getBlockSize(), + keyInfo.getModificationTime(), + keyInfo.getModificationTime(), + status.isDirectory() ? (short) 00777 : (short) 00666, + StringUtils.defaultIfEmpty(keyInfo.getOwnerName(), owner), + owner, + null, + getBlockLocations(null), + false, + OzoneClientUtils.isKeyErasureCode(keyInfo) + ); + } + /** * Helper method to get List of BlockLocation from OM Key info. * @param fileStatus Ozone key file status. @@ -1076,16 +1114,15 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { omKeyLocationInfo.getPipeline().getNodes() .forEach(dn -> { hostList.add(dn.getHostName()); - int port = dn.getPort( - DatanodeDetails.Port.Name.STANDALONE).getValue(); + int port = dn.getStandalonePort().getValue(); if (port == 0) { port = configuredDnPort; } nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index eb346b5edc5..66b0037cf33 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs.ozone; +import com.google.common.base.Function; import com.google.common.base.Preconditions; import io.opentracing.Span; import io.opentracing.util.GlobalTracer; @@ -153,9 +154,6 @@ public void initialize(URI name, Configuration conf) throws IOException { OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); - hsyncEnabled = conf.getBoolean( - OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, - OZONE_FS_HSYNC_ENABLED_DEFAULT); setConf(conf); Preconditions.checkNotNull(name.getScheme(), "No scheme provided in %s", name); @@ -192,6 +190,8 @@ public void initialize(URI name, Configuration conf) throws IOException { LOG.trace("Ozone URI for OFS initialization is " + uri); ConfigurationSource source = getConfSource(); + this.hsyncEnabled = OzoneFSUtils.canEnableHsync(source, true); + LOG.debug("hsyncEnabled = {}", hsyncEnabled); this.adapter = createAdapter(source, omHostOrServiceId, omPort); this.adapterImpl = (BasicRootedOzoneClientAdapterImpl) this.adapter; @@ -916,7 +916,7 @@ private boolean o3Exists(final Path f) throws IOException { @Override public FileStatus[] listStatus(Path f) throws IOException { return TracingUtil.executeInNewSpan("ofs listStatus", - () -> convertFileStatusArr(listStatusAdapter(f))); + () -> convertFileStatusArr(listStatusAdapter(f, true))); } private FileStatus[] convertFileStatusArr( @@ -930,7 +930,7 @@ private FileStatus[] convertFileStatusArr( } - public List listStatusAdapter(Path f) throws IOException { + private List listStatusAdapter(Path f, boolean lite) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); LOG.trace("listStatus() path:{}", f); @@ -938,25 +938,27 @@ public List listStatusAdapter(Path f) throws IOException { LinkedList statuses = new LinkedList<>(); List tmpStatusList; String startPath = ""; - + int entriesAdded; do { tmpStatusList = adapter.listStatus(pathToKey(f), false, startPath, - numEntries, uri, workingDir, getUsername()); - + numEntries, uri, workingDir, getUsername(), lite); + entriesAdded = 0; if (!tmpStatusList.isEmpty()) { if (startPath.isEmpty() || !statuses.getLast().getPath().toString() .equals(tmpStatusList.get(0).getPath().toString())) { statuses.addAll(tmpStatusList); + entriesAdded += tmpStatusList.size(); } else { statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size())); + entriesAdded += tmpStatusList.size() - 1; } startPath = pathToKey(statuses.getLast().getPath()); } // listStatus returns entries numEntries in size if available. // Any lesser number of entries indicate that the required entries have // exhausted. - } while (tmpStatusList.size() == numEntries); + } while (entriesAdded > 0); return statuses; } @@ -1179,7 +1181,9 @@ public RemoteIterator listFiles(Path f, boolean recursive) public RemoteIterator listLocatedStatus(Path f) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); - return super.listLocatedStatus(f); + return new OzoneFileStatusIterator<>(f, + (stat) -> stat instanceof LocatedFileStatus ? (LocatedFileStatus) stat : new LocatedFileStatus(stat, null), + false); } @Override @@ -1194,7 +1198,7 @@ public RemoteIterator listStatusIterator(Path f) "Instead use 'ozone sh key list " + "' command"); } - return new OzoneFileStatusIterator<>(f); + return new OzoneFileStatusIterator<>(f, stat -> stat, true); } /** @@ -1204,23 +1208,29 @@ public RemoteIterator listStatusIterator(Path f) */ private final class OzoneFileStatusIterator implements RemoteIterator { + private final Function transformFunc; private List thisListing; private int i; private Path p; private T curStat = null; private String startPath = ""; + private boolean lite; /** * Constructor to initialize OzoneFileStatusIterator. * Get the first batch of entry for iteration. * * @param p path to file/directory. + * @param transformFunc function to convert FileStatus into an expected type. + * @param lite if true it should look into fetching a lightweight keys from server. * @throws IOException */ - private OzoneFileStatusIterator(Path p) throws IOException { + private OzoneFileStatusIterator(Path p, Function transformFunc, boolean lite) throws IOException { this.p = p; + this.lite = lite; + this.transformFunc = transformFunc; // fetch the first batch of entries in the directory - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1239,7 +1249,7 @@ public boolean hasNext() throws IOException { while (curStat == null && hasNextNoFilter()) { T next; FileStatus fileStat = thisListing.get(i++); - next = (T) (fileStat); + next = transformFunc.apply(fileStat); curStat = next; } return curStat != null; @@ -1257,10 +1267,9 @@ private boolean hasNextNoFilter() throws IOException { return false; } if (i >= thisListing.size()) { - if (startPath != null && (thisListing.size() == listingPageSize || - thisListing.size() == listingPageSize - 1)) { + if (startPath != null && (!thisListing.isEmpty())) { // current listing is exhausted & fetch a new listing - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1295,10 +1304,11 @@ public T next() throws IOException { * * @param f * @param startPath + * @param lite if true return lightweight keys * @return list of file status. * @throws IOException */ - private List listFileStatus(Path f, String startPath) + private List listFileStatus(Path f, String startPath, boolean lite) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); @@ -1306,7 +1316,7 @@ private List listFileStatus(Path f, String startPath) List statusList; statusList = adapter.listStatus(pathToKey(f), false, startPath, - listingPageSize, uri, workingDir, getUsername()) + listingPageSize, uri, workingDir, getUsername(), lite) .stream() .map(this::convertFileStatus) .collect(Collectors.toList()); @@ -1443,7 +1453,7 @@ boolean iterate() throws IOException { ofsPath.getNonKeyPathNoPrefixDelim() + OZONE_URI_DELIMITER; if (isFSO) { List fileStatuses; - fileStatuses = listStatusAdapter(path); + fileStatuses = listStatusAdapter(path, true); for (FileStatusAdapter fileStatus : fileStatuses) { String keyName = new OFSPath(fileStatus.getPath().toString(), @@ -1568,7 +1578,7 @@ private ContentSummary getContentSummaryInSpan(Path f) throws IOException { // f is a directory long[] summary = {0, 0, 0, 1}; int i = 0; - for (FileStatusAdapter s : listStatusAdapter(f)) { + for (FileStatusAdapter s : listStatusAdapter(f, true)) { long length = s.getLength(); long spaceConsumed = s.getDiskConsumed(); ContentSummary c = s.isDir() ? getContentSummary(s.getPath()) : diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java index f92f8d95704..6354ee0eebe 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java @@ -33,7 +33,7 @@ * information can be converted to this class, and this class can be used to * create hadoop 2.x FileStatus. *

    - * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) + * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) */ public final class FileStatusAdapter { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index e468ac498c4..24ff692e1b4 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -69,9 +69,10 @@ OzoneFSDataStreamOutput createStreamFile(String key, short replication, Iterator listKeys(String pathKey) throws IOException; + @SuppressWarnings("checkstyle:ParameterNumber") List listStatus(String keyName, boolean recursive, String startKey, long numEntries, URI uri, - Path workingDir, String username) throws IOException; + Path workingDir, String username, boolean lite) throws IOException; Token getDelegationToken(String renewer) throws IOException; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java index 383ad6db495..6c9fb3ccc7b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.checksum.ChecksumHelperFactory; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -246,6 +247,11 @@ public static boolean isKeyErasureCode(OmKeyInfo keyInfo) { HddsProtos.ReplicationType.EC; } + public static boolean isKeyErasureCode(BasicOmKeyInfo keyInfo) { + return keyInfo.getReplicationConfig().getReplicationType() == + HddsProtos.ReplicationType.EC; + } + public static boolean isKeyEncrypted(OmKeyInfo keyInfo) { return !Objects.isNull(keyInfo.getFileEncryptionInfo()); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index 4dc70bfa569..f873b43ae98 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -193,7 +193,6 @@ public int read(long position, ByteBuffer buf) throws IOException { /** * @param buf the ByteBuffer to receive the results of the read operation. * @param position offset - * @return void * @throws IOException if there is some error performing the read * @throws EOFException if end of file reached before reading fully */ diff --git a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java index a15da5228f3..96506933952 100644 --- a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java +++ b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java @@ -22,7 +22,6 @@ import org.apache.hadoop.util.ToolRunner; import java.io.ByteArrayOutputStream; -import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; @@ -32,7 +31,6 @@ import org.junit.jupiter.api.Test; - /** * Tests the behavior of OzoneFsShell. */ @@ -40,7 +38,7 @@ public class TestOzoneFsShell { // tests command handler for FsShell bound to OzoneDelete class @Test - public void testOzoneFsShellRegisterDeleteCmd() throws IOException { + public void testOzoneFsShellRegisterDeleteCmd() throws Exception { final String rmCmdName = "rm"; final String rmCmd = "-" + rmCmdName; final String arg = "arg1"; @@ -52,16 +50,17 @@ public void testOzoneFsShellRegisterDeleteCmd() throws IOException { System.setErr(bytesPrintStream); try { ToolRunner.run(shell, argv); - } catch (Exception e) { - } finally { + // test command bindings for "rm" command handled by OzoneDelete class CommandFactory factory = shell.getCommandFactory(); + assertNotNull(factory); assertEquals(1, Arrays.stream(factory.getNames()) .filter(c -> c.equals(rmCmd)).count()); Command instance = factory.getInstance(rmCmd); assertNotNull(instance); assertEquals(OzoneFsDelete.Rm.class, instance.getClass()); assertEquals(rmCmdName, instance.getCommandName()); + } finally { shell.close(); System.setErr(oldErr); } diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index fad83ea86c1..8585a9dd544 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-hadoop2 Apache Ozone FS Hadoop 2.x compatibility jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java index b5012f95c4e..e1cb391da53 100644 --- a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java @@ -45,4 +45,13 @@ public OzFs(URI theUri, Configuration conf) public int getUriDefaultPort() { return -1; } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java index 4cd04da9c86..0f421a85523 100644 --- a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -46,4 +46,13 @@ public RootedOzFs(URI theUri, Configuration conf) public int getUriDefaultPort() { return -1; } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index f27bd411db7..2f23a5d318e 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -19,7 +19,7 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java index 914832e2cfa..b1e046547fa 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java @@ -40,4 +40,13 @@ public OzFs(URI theUri, Configuration conf) super(theUri, new OzoneFileSystem(), conf, OzoneConsts.OZONE_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java index 076287eaac1..81bbaacd7c8 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -41,4 +41,13 @@ public RootedOzFs(URI theUri, Configuration conf) super(theUri, new RootedOzoneFileSystem(), conf, OzoneConsts.OZONE_OFS_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index df6c724883c..9e77ffd7c33 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-shaded Apache Ozone FileSystem Shaded jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT true @@ -78,6 +78,12 @@ + + com.google.protobuf + protobuf-java + 2.5.0 + compile + diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index 176f21b9860..aa554c422e5 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem Apache Ozone FileSystem jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java index 346b994a3ae..548e11f5d48 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java @@ -41,4 +41,13 @@ public OzFs(URI theUri, Configuration conf) super(theUri, new OzoneFileSystem(), conf, OzoneConsts.OZONE_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java index 076287eaac1..81bbaacd7c8 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -41,4 +41,13 @@ public RootedOzFs(URI theUri, Configuration conf) super(theUri, new RootedOzoneFileSystem(), conf, OzoneConsts.OZONE_OFS_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index e262895664f..fdd9b2734cd 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -16,16 +16,17 @@ org.apache.ozone ozone-main - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Project Apache Ozone pom apache/ozone:${project.version} + true interface-client @@ -38,6 +39,8 @@ ozonefs-common ozonefs datanode + recon + recon-codegen s3gateway dist csi @@ -47,13 +50,6 @@ s3-secret-store - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - @@ -342,6 +338,29 @@ + + org.apache.maven.plugins + maven-remote-resources-plugin + + + org.apache.ozone:ozone-dev-support:${ozone.version} + + + + + org.apache.ozone + ozone-dev-support + ${ozone.version} + + + + + + process + + + + @@ -373,18 +392,6 @@ ozonefs-hadoop2 - - build-with-recon - - - !skipRecon - - - - recon - recon-codegen - - parallel-tests @@ -432,36 +439,5 @@ - - add-classpath-descriptor - - - src/main/java - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - add-classpath-descriptor - prepare-package - - build-classpath - - - ${project.build.outputDirectory}/${project.artifactId}.classpath - $HDDS_LIB_JARS_DIR - true - runtime - - - - - - - diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml index bb7756a9de3..b8345c7d343 100644 --- a/hadoop-ozone/recon-codegen/pom.xml +++ b/hadoop-ozone/recon-codegen/pom.xml @@ -18,7 +18,7 @@ ozone org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 ozone-reconcodegen diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java index 8272c2bd6da..d59ab8acd6b 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java @@ -22,6 +22,7 @@ import org.hadoop.ozone.recon.schema.ReconSchemaDefinition; import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; +import org.hadoop.ozone.recon.schema.SchemaVersionTableDefinition; import com.google.inject.AbstractModule; import com.google.inject.multibindings.Multibinder; @@ -40,5 +41,6 @@ protected void configure() { schemaBinder.addBinding().to(ContainerSchemaDefinition.class); schemaBinder.addBinding().to(ReconTaskSchemaDefinition.class); schemaBinder.addBinding().to(StatsSchemaDefinition.class); + schemaBinder.addBinding().to(SchemaVersionTableDefinition.class); } } diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java index a3675dcbe77..7fb98be4a1d 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java @@ -24,8 +24,11 @@ import java.io.OutputStream; import java.sql.Connection; import java.sql.DriverManager; +import java.sql.ResultSet; import java.sql.SQLException; import java.util.function.BiPredicate; +import java.util.ArrayList; +import java.util.List; import org.jooq.exception.DataAccessException; import org.jooq.impl.DSL; @@ -95,4 +98,23 @@ public void write(int b) throws IOException { LOG.info("{} table already exists, skipping creation.", tableName); return true; }; + + /** + * Utility method to list all user-defined tables in the database. + * + * @param connection The database connection to use. + * @return A list of table names (user-defined tables only). + * @throws SQLException If there is an issue accessing the database metadata. + */ + public static List listAllTables(Connection connection) throws SQLException { + List tableNames = new ArrayList<>(); + try (ResultSet resultSet = connection.getMetaData().getTables(null, null, null, new String[]{"TABLE"})) { + while (resultSet.next()) { + String tableName = resultSet.getString("TABLE_NAME"); + tableNames.add(tableName); + } + } + LOG.debug("Found {} user-defined tables in the database: {}", tableNames.size(), tableNames); + return tableNames; + } } diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 4d62ca886cd..0c778aead5d 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -51,7 +51,7 @@ public enum UnHealthyContainerStates { UNDER_REPLICATED, OVER_REPLICATED, MIS_REPLICATED, - ALL_REPLICAS_UNHEALTHY, + ALL_REPLICAS_BAD, NEGATIVE_SIZE // Added new state to track containers with negative sizes } @@ -97,4 +97,8 @@ private void createUnhealthyContainersTable() { public DSLContext getDSLContext() { return dslContext; } + + public DataSource getDataSource() { + return dataSource; + } } diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/SchemaVersionTableDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/SchemaVersionTableDefinition.java new file mode 100644 index 00000000000..6545a539038 --- /dev/null +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/SchemaVersionTableDefinition.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.hadoop.ozone.recon.schema; + +import com.google.inject.Inject; +import com.google.inject.Singleton; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.jooq.impl.SQLDataType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; + +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.TABLE_EXISTS_CHECK; +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.listAllTables; +import static org.jooq.impl.DSL.name; + +/** + * Class for managing the schema of the SchemaVersion table. + */ +@Singleton +public class SchemaVersionTableDefinition implements ReconSchemaDefinition { + + private static final Logger LOG = LoggerFactory.getLogger(SchemaVersionTableDefinition.class); + + public static final String SCHEMA_VERSION_TABLE_NAME = "RECON_SCHEMA_VERSION"; + private final DataSource dataSource; + private int latestSLV; + + @Inject + public SchemaVersionTableDefinition(DataSource dataSource) { + this.dataSource = dataSource; + } + + @Override + public void initializeSchema() throws SQLException { + try (Connection conn = dataSource.getConnection()) { + DSLContext localDslContext = DSL.using(conn); + + if (!TABLE_EXISTS_CHECK.test(conn, SCHEMA_VERSION_TABLE_NAME)) { + // If the RECON_SCHEMA_VERSION table does not exist, check for other tables + // to identify if it is a fresh install + boolean isFreshInstall = listAllTables(conn).isEmpty(); + createSchemaVersionTable(localDslContext); + + if (isFreshInstall) { + // Fresh install: Set the SLV to the latest version + insertInitialSLV(localDslContext, latestSLV); + } + } + } + } + + /** + * Create the Schema Version table. + * + * @param dslContext The DSLContext to use for the operation. + */ + private void createSchemaVersionTable(DSLContext dslContext) { + dslContext.createTableIfNotExists(SCHEMA_VERSION_TABLE_NAME) + .column("version_number", SQLDataType.INTEGER.nullable(false)) + .column("applied_on", SQLDataType.TIMESTAMP.defaultValue(DSL.currentTimestamp())) + .execute(); + } + + /** + * Inserts the initial SLV into the Schema Version table. + * + * @param dslContext The DSLContext to use for the operation. + * @param slv The initial SLV value. + */ + private void insertInitialSLV(DSLContext dslContext, int slv) { + dslContext.insertInto(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .columns(DSL.field(name("version_number")), + DSL.field(name("applied_on"))) + .values(slv, DSL.currentTimestamp()) + .execute(); + LOG.info("Inserted initial SLV '{}' into SchemaVersion table.", slv); + } + + /** + * Set the latest SLV. + * + * @param slv The latest Software Layout Version. + */ + public void setLatestSLV(int slv) { + this.latestSLV = slv; + } +} diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index a24252c1ed6..f203689b669 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -18,12 +18,13 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Recon 4.0.0 ozone-recon + false 8.15.7 @@ -209,6 +210,9 @@ ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build + + static/** + true @@ -229,6 +233,7 @@ + woff woff2 diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java index 6312365bf4b..9f0a9796e28 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java @@ -53,6 +53,8 @@ private static void addDeprecations() { @VisibleForTesting public static void setConfiguration(OzoneConfiguration conf) { + // Nullity check is used in case the configuration was already set + // in the MiniOzoneCluster if (configuration == null) { ConfigurationProvider.configuration = conf; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index ed657931e03..5768166c950 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -43,20 +43,20 @@ private ReconConstants() { public static final int DISK_USAGE_TOP_RECORDS_LIMIT = 30; public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false"; public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; - public static final String DEFAULT_START_PREFIX = "/"; public static final String DEFAULT_FETCH_COUNT = "1000"; public static final String DEFAULT_KEY_SIZE = "0"; public static final String DEFAULT_BATCH_NUMBER = "1"; public static final String RECON_QUERY_BATCH_PARAM = "batchNum"; public static final String RECON_QUERY_PREVKEY = "prevKey"; + public static final String RECON_QUERY_START_PREFIX = "startPrefix"; public static final String RECON_OPEN_KEY_INCLUDE_NON_FSO = "includeNonFso"; public static final String RECON_OPEN_KEY_INCLUDE_FSO = "includeFso"; - public static final String RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT = "1000"; - public static final String RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY = ""; + public static final String RECON_OM_INSIGHTS_DEFAULT_START_PREFIX = "/"; + public static final String RECON_OM_INSIGHTS_DEFAULT_SEARCH_LIMIT = "1000"; + public static final String RECON_OM_INSIGHTS_DEFAULT_SEARCH_PREV_KEY = ""; public static final String RECON_QUERY_FILTER = "missingIn"; public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; - public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = - "0"; + public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = "0"; // Only include containers that are missing in OM by default public static final String DEFAULT_FILTER_FOR_MISSING_CONTAINERS = "SCM"; public static final String RECON_QUERY_LIMIT = "limit"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java index c9875cb826b..a98603a7e9c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java @@ -128,7 +128,10 @@ public enum ErrorCode { Arrays.asList("Overview (OM Data)", "OM DB Insights")), GET_SCM_DB_SNAPSHOT_FAILED( "SCM DB Snapshot sync failed !!!", - Arrays.asList("Containers", "Pipelines")); + Arrays.asList("Containers", "Pipelines")), + UPGRADE_FAILURE( + "Schema upgrade failed. Recon encountered an issue while finalizing the layout upgrade.", + Arrays.asList("Recon startup", "Metadata Layout Version")); private final String message; private final List impacts; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java index 41235ae5428..dc53f195f67 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java @@ -44,7 +44,7 @@ public static Response noMatchedKeysResponse(String startPrefix) { String jsonResponse = String.format( "{\"message\": \"No keys matched the search prefix: '%s'.\"}", startPrefix); - return Response.status(Response.Status.NOT_FOUND) + return Response.status(Response.Status.NO_CONTENT) .entity(jsonResponse) .type(MediaType.APPLICATION_JSON) .build(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java index 253e37d75ab..66f1212d3f8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java @@ -22,7 +22,9 @@ import java.util.HashSet; import java.util.Set; +import org.apache.hadoop.ozone.recon.upgrade.ReconLayoutFeature; import org.hadoop.ozone.recon.schema.ReconSchemaDefinition; +import org.hadoop.ozone.recon.schema.SchemaVersionTableDefinition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,13 +47,46 @@ public ReconSchemaManager(Set reconSchemaDefinitions) { @VisibleForTesting public void createReconSchema() { - reconSchemaDefinitions.forEach(reconSchemaDefinition -> { - try { - reconSchemaDefinition.initializeSchema(); - } catch (SQLException e) { - LOG.error("Error creating Recon schema {}.", - reconSchemaDefinition.getClass().getSimpleName(), e); - } - }); + // Calculate the latest SLV from ReconLayoutFeature + int latestSLV = calculateLatestSLV(); + + try { + // Initialize the schema version table first + reconSchemaDefinitions.stream() + .filter(SchemaVersionTableDefinition.class::isInstance) + .findFirst() + .ifPresent(schemaDefinition -> { + SchemaVersionTableDefinition schemaVersionTable = (SchemaVersionTableDefinition) schemaDefinition; + schemaVersionTable.setLatestSLV(latestSLV); + try { + schemaVersionTable.initializeSchema(); + } catch (SQLException e) { + LOG.error("Error initializing SchemaVersionTableDefinition.", e); + } + }); + + // Initialize all other tables + reconSchemaDefinitions.stream() + .filter(definition -> !(definition instanceof SchemaVersionTableDefinition)) + .forEach(definition -> { + try { + definition.initializeSchema(); + } catch (SQLException e) { + LOG.error("Error initializing schema: {}.", definition.getClass().getSimpleName(), e); + } + }); + + } catch (Exception e) { + LOG.error("Error creating Recon schema.", e); + } + } + + /** + * Calculate the latest SLV by iterating over ReconLayoutFeature. + * + * @return The latest SLV. + */ + private int calculateLatestSLV() { + return ReconLayoutFeature.determineSLV(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaVersionTableManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaVersionTableManager.java new file mode 100644 index 00000000000..e01d52b89cd --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaVersionTableManager.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon; + +import com.google.inject.Inject; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; + +import static org.jooq.impl.DSL.name; + +/** + * Manager for handling the Recon Schema Version table. + * This class provides methods to get and update the current schema version. + */ +public class ReconSchemaVersionTableManager { + + private static final Logger LOG = LoggerFactory.getLogger(ReconSchemaVersionTableManager.class); + public static final String RECON_SCHEMA_VERSION_TABLE_NAME = "RECON_SCHEMA_VERSION"; + private DSLContext dslContext; + private final DataSource dataSource; + + @Inject + public ReconSchemaVersionTableManager(DataSource dataSource) throws SQLException { + this.dataSource = dataSource; + this.dslContext = DSL.using(dataSource.getConnection()); + } + + /** + * Get the current schema version from the RECON_SCHEMA_VERSION table. + * If the table is empty, or if it does not exist, it will return 0. + * @return The current schema version. + */ + public int getCurrentSchemaVersion() throws SQLException { + try { + return dslContext.select(DSL.field(name("version_number"))) + .from(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME)) + .fetchOptional() + .map(record -> record.get( + DSL.field(name("version_number"), Integer.class))) + .orElse(-1); // Return -1 if no version is found + } catch (Exception e) { + LOG.error("Failed to fetch the current schema version.", e); + throw new SQLException("Unable to read schema version from the table.", e); + } + } + + /** + * Update the schema version in the RECON_SCHEMA_VERSION table after all tables are upgraded. + * + * @param newVersion The new version to set. + */ + public void updateSchemaVersion(int newVersion, Connection conn) { + dslContext = DSL.using(conn); + boolean recordExists = dslContext.fetchExists(dslContext.selectOne() + .from(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME))); + + if (recordExists) { + // Update the existing schema version record + dslContext.update(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME)) + .set(DSL.field(name("version_number")), newVersion) + .set(DSL.field(name("applied_on")), DSL.currentTimestamp()) + .execute(); + LOG.info("Updated schema version to '{}'.", newVersion); + } else { + // Insert a new schema version record + dslContext.insertInto(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME)) + .columns(DSL.field(name("version_number")), + DSL.field(name("applied_on"))) + .values(newVersion, DSL.currentTimestamp()) + .execute(); + LOG.info("Inserted new schema version '{}'.", newVersion); + } + } + + /** + * Provides the data source used by this manager. + * @return The DataSource instance. + */ + public DataSource getDataSource() { + return dataSource; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index 3295eb4524c..24b5c10952a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider; +import org.apache.hadoop.ozone.recon.upgrade.ReconLayoutVersionManager; import org.apache.hadoop.ozone.util.OzoneNetUtils; import org.apache.hadoop.ozone.util.OzoneVersionInfo; import org.apache.hadoop.ozone.util.ShutdownHookManager; @@ -105,6 +106,7 @@ public Void call() throws Exception { ReconServer.class, originalArgs, LOG, configuration); ConfigurationProvider.setConfiguration(configuration); + injector = Guice.createInjector(new ReconControllerModule(), new ReconRestServletModule(configuration), new ReconSchemaGenerationModule()); @@ -136,8 +138,11 @@ public Void call() throws Exception { this.reconNamespaceSummaryManager = injector.getInstance(ReconNamespaceSummaryManager.class); + ReconContext reconContext = injector.getInstance(ReconContext.class); + ReconSchemaManager reconSchemaManager = injector.getInstance(ReconSchemaManager.class); + LOG.info("Creating Recon Schema."); reconSchemaManager.createReconSchema(); LOG.debug("Recon schema creation done."); @@ -153,6 +158,17 @@ public Void call() throws Exception { this.reconTaskStatusMetrics = injector.getInstance(ReconTaskStatusMetrics.class); + // Handle Recon Schema Versioning + ReconSchemaVersionTableManager versionTableManager = + injector.getInstance(ReconSchemaVersionTableManager.class); + + ReconLayoutVersionManager layoutVersionManager = + new ReconLayoutVersionManager(versionTableManager, reconContext); + // Run the upgrade framework to finalize layout features if needed + ReconStorageContainerManagerFacade reconStorageContainerManagerFacade = + (ReconStorageContainerManagerFacade) this.getReconStorageContainerManager(); + layoutVersionManager.finalizeLayoutFeatures(reconStorageContainerManagerFacade); + LOG.info("Initializing support of Recon Features..."); FeatureProvider.initFeatureSupport(configuration); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 5c9f6a5f4e1..12139e17723 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -32,11 +32,14 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; -import java.util.List; -import java.util.TimeZone; +import java.util.ArrayList; +import java.util.Collections; import java.util.Date; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Set; -import java.util.ArrayList; +import java.util.TimeZone; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -52,8 +55,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.IOUtils; @@ -73,8 +79,13 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.api.ServiceNotReadyException; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; @@ -286,11 +297,64 @@ public void untarCheckpointFile(File tarFile, Path destPath) */ public static String constructFullPath(OmKeyInfo omKeyInfo, ReconNamespaceSummaryManager reconNamespaceSummaryManager, - ReconOMMetadataManager omMetadataManager) - throws IOException { + ReconOMMetadataManager omMetadataManager) throws IOException { + return constructFullPath(omKeyInfo.getKeyName(), omKeyInfo.getParentObjectID(), omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), reconNamespaceSummaryManager, omMetadataManager); + } - StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); - long parentId = omKeyInfo.getParentObjectID(); + /** + * Constructs the full path of a key from its key name and parent ID using a bottom-up approach, starting from the + * leaf node. + * + * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure + * is currently being rebuilt (indicated by the rebuildTriggered flag), this method returns an empty string to signify + * that path construction is temporarily unavailable. + * + * @param keyName The name of the key + * @param initialParentId The parent ID of the key + * @param volumeName The name of the volume + * @param bucketName The name of the bucket + * @return The constructed full path of the key as a String, or an empty string if a rebuild is in progress and + * the path cannot be constructed at this time. + * @throws IOException + */ + public static String constructFullPath(String keyName, long initialParentId, String volumeName, String bucketName, + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) throws IOException { + StringBuilder fullPath = constructFullPathPrefix(initialParentId, volumeName, bucketName, + reconNamespaceSummaryManager, omMetadataManager); + if (fullPath.length() == 0) { + return ""; + } + fullPath.append(keyName); + return fullPath.toString(); + } + + + /** + * Constructs the prefix path to a key from its key name and parent ID using a bottom-up approach, starting from the + * leaf node. + * + * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure + * is currently being rebuilt (indicated by the rebuildTriggered flag), this method returns an empty string to signify + * that path construction is temporarily unavailable. + * + * @param initialParentId The parent ID of the key + * @param volumeName The name of the volume + * @param bucketName The name of the bucket + * @return A StringBuilder containing the constructed prefix path of the key, or an empty string builder if a rebuild + * is in progress. + * @throws IOException + */ + public static StringBuilder constructFullPathPrefix(long initialParentId, String volumeName, + String bucketName, ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) throws IOException { + StringBuilder fullPath = new StringBuilder(); + long parentId = initialParentId; boolean isDirectoryPresent = false; while (parentId != 0) { @@ -298,16 +362,19 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, if (nsSummary == null) { log.warn("NSSummary tree is currently being rebuilt or the directory could be in the progress of " + "deletion, returning empty string for path construction."); - return ""; + throw new ServiceNotReadyException("Service is initializing. Please try again later."); } if (nsSummary.getParentId() == -1) { if (rebuildTriggered.compareAndSet(false, true)) { triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); } log.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); - return ""; + throw new ServiceNotReadyException("Service is initializing. Please try again later."); + } + // On the last pass, dir-name will be empty and parent will be zero, indicating the loop should end. + if (!nsSummary.getDirName().isEmpty()) { + fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); } - fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); // Move to the parent ID of the current directory parentId = nsSummary.getParentId(); @@ -315,13 +382,113 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, } // Prepend the volume and bucket to the constructed path - String volumeName = omKeyInfo.getVolumeName(); - String bucketName = omKeyInfo.getBucketName(); fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX); + // TODO - why is this needed? It seems lke it should handle double slashes in the path name, + // but its not clear how they get there. This normalize call is quite expensive as it + // creates several objects (URI, PATH, back to string). There was a bug fixed above + // where the last parent dirName was empty, which always caused a double // after the + // bucket name, but with that fixed, it seems like this should not be needed. All tests + // pass without it for key listing. if (isDirectoryPresent) { - return OmUtils.normalizeKey(fullPath.toString(), true); + String path = fullPath.toString(); + fullPath.setLength(0); + fullPath.append(OmUtils.normalizeKey(path, true)); } - return fullPath.toString(); + return fullPath; + } + + /** + * Converts a key prefix into an object path for FSO buckets, using IDs. + * + * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into + * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names + * with their corresponding IDs. It simplifies database queries for FSO bucket operations. + *

    +   * {@code
    +   * Examples:
    +   * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key"
    +   * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/"
    +   * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1"
    +   * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/"
    +   * }
    +   * 
    + * @param prevKeyPrefix The path to be converted. + * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. + * @throws IOException If database access fails. + * @throws IllegalArgumentException If the provided path is invalid or cannot be converted. + */ + public static String convertToObjectPathForOpenKeySearch(String prevKeyPrefix, + ReconOMMetadataManager omMetadataManager, + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + OzoneStorageContainerManager reconSCM) + throws IOException { + try { + String[] names = EntityHandler.parseRequestPath(EntityHandler.normalizePath( + prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); + Table openFileTable = omMetadataManager.getOpenKeyTable( + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // Root-Level: Return the original path + if (names.length == 0 || names[0].isEmpty()) { + return prevKeyPrefix; + } + + // Volume-Level: Fetch the volumeID + String volumeName = names[0]; + validateNames(volumeName); + String volumeKey = omMetadataManager.getVolumeKey(volumeName); + long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); + if (names.length == 1) { + return constructObjectPathWithPrefix(volumeId); + } + + // Bucket-Level: Fetch the bucketID + String bucketName = names[1]; + validateNames(bucketName); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); + long bucketId = bucketInfo.getObjectID(); + if (names.length == 2 || bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + return constructObjectPathWithPrefix(volumeId, bucketId); + } + + // Directory or Key-Level: Check both key and directory + BucketHandler handler = + BucketHandler.getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); + + if (names.length >= 3) { + String lastEntiry = names[names.length - 1]; + + // Check if the directory exists + OmDirectoryInfo dirInfo = handler.getDirInfo(names); + if (dirInfo != null && dirInfo.getName().equals(lastEntiry)) { + return constructObjectPathWithPrefix(volumeId, bucketId, dirInfo.getObjectID()) + OM_KEY_PREFIX; + } + + // Check if the key exists + long dirID = handler.getDirObjectId(names, names.length); + String keyKey = constructObjectPathWithPrefix(volumeId, bucketId, dirID) + + OM_KEY_PREFIX + lastEntiry; + OmKeyInfo keyInfo = openFileTable.getSkipCache(keyKey); + if (keyInfo != null && keyInfo.getFileName().equals(lastEntiry)) { + return constructObjectPathWithPrefix(volumeId, bucketId, + keyInfo.getParentObjectID()) + OM_KEY_PREFIX + lastEntiry; + } + + return prevKeyPrefix; + } + } catch (IllegalArgumentException e) { + log.error( + "IllegalArgumentException encountered while converting key prefix to object path: {}", + prevKeyPrefix, e); + throw e; + } catch (RuntimeException e) { + log.error( + "RuntimeException encountered while converting key prefix to object path: {}", + prevKeyPrefix, e); + return prevKeyPrefix; + } + return prevKeyPrefix; } private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, @@ -352,7 +519,8 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu * @param url url to call * @param isSpnego is SPNEGO enabled * @return HttpURLConnection instance of the HTTP call. - * @throws IOException, AuthenticationException While reading the response. + * @throws IOException While reading the response, + * @throws AuthenticationException */ public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory, String url, boolean isSpnego) @@ -569,7 +737,6 @@ public static boolean isInitializationComplete(ReconOMMetadataManager omMetadata * @param dateFormat * @param timeZone * @return the epoch milliseconds representation of the date. - * @throws ParseException */ public static long convertToEpochMillis(String dateString, String dateFormat, TimeZone timeZone) { String localDateFormat = dateFormat; @@ -596,6 +763,109 @@ public static long convertToEpochMillis(String dateString, String dateFormat, Ti } } + public static boolean validateStartPrefix(String startPrefix) { + + // Ensure startPrefix starts with '/' for non-empty values + startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; + + // Split the path to ensure it's at least at the bucket level (volume/bucket). + String[] pathComponents = startPrefix.split("/"); + if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { + return false; // Invalid if not at bucket level or deeper + } + + return true; + } + + /** + * Retrieves keys from the specified table based on pagination and prefix filtering. + * This method handles different scenarios based on the presence of {@code startPrefix} + * and {@code prevKey}, enabling efficient key retrieval from the table. + * + * The method handles the following cases: + * + * 1. {@code prevKey} provided, {@code startPrefix} empty: + * - Seeks to {@code prevKey}, skips it, and returns subsequent records up to the limit. + * + * 2. {@code prevKey} empty, {@code startPrefix} empty: + * - Iterates from the beginning of the table, retrieving all records up to the limit. + * + * 3. {@code startPrefix} provided, {@code prevKey} empty: + * - Seeks to the first key matching {@code startPrefix} and returns all matching keys up to the limit. + * + * 4. {@code startPrefix} provided, {@code prevKey} provided: + * - Seeks to {@code prevKey}, skips it, and returns subsequent keys that match {@code startPrefix}, + * up to the limit. + * + * This method also handles the following {@code limit} scenarios: + * - If {@code limit == 0} or {@code limit < -1}, no records are returned. + * - If {@code limit == -1}, all records are returned. + * - For positive {@code limit}, it retrieves records up to the specified {@code limit}. + * + * @param table The table to retrieve keys from. + * @param startPrefix The search prefix to match keys against. + * @param limit The maximum number of keys to retrieve. + * @param prevKey The key to start after for the next set of records. + * @return A map of keys and their corresponding {@code OmKeyInfo} or {@code RepeatedOmKeyInfo} objects. + * @throws IOException If there are problems accessing the table. + */ + public static Map extractKeysFromTable( + Table table, String startPrefix, int limit, String prevKey) + throws IOException { + + Map matchedKeys = new LinkedHashMap<>(); + + // Null check for the table to prevent NPE during omMetaManager initialization + if (table == null) { + log.error("Table object is null. omMetaManager might still be initializing."); + return Collections.emptyMap(); + } + + // If limit = 0, return an empty result set + if (limit == 0 || limit < -1) { + return matchedKeys; + } + + // If limit = -1, set it to Integer.MAX_VALUE to return all records + int actualLimit = (limit == -1) ? Integer.MAX_VALUE : limit; + + try (TableIterator> keyIter = table.iterator()) { + + // Scenario 1 & 4: prevKey is provided (whether startPrefix is empty or not) + if (!prevKey.isEmpty()) { + keyIter.seek(prevKey); + if (keyIter.hasNext()) { + keyIter.next(); // Skip the previous key record + } + } else if (!startPrefix.isEmpty()) { + // Scenario 3: startPrefix is provided but prevKey is empty, so seek to startPrefix + keyIter.seek(startPrefix); + } + + // Scenario 2: Both startPrefix and prevKey are empty (iterate from the start of the table) + // No seeking needed; just start iterating from the first record in the table + // This is implicit in the following loop, as the iterator will start from the beginning + + // Iterate through the keys while adhering to the limit (if the limit is not zero) + while (keyIter.hasNext() && matchedKeys.size() < actualLimit) { + Table.KeyValue entry = keyIter.next(); + String dbKey = entry.getKey(); + + // Scenario 3 & 4: If startPrefix is provided, ensure the key matches startPrefix + if (!startPrefix.isEmpty() && !dbKey.startsWith(startPrefix)) { + break; // If the key no longer matches the prefix, exit the loop + } + + // Add the valid key-value pair to the results + matchedKeys.put(dbKey, entry.getValue()); + } + } catch (IOException exception) { + log.error("Error retrieving keys from table for path: {}", startPrefix, exception); + throw exception; + } + return matchedKeys; + } + /** * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java index b0a9681c5b8..472cdb62a66 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java @@ -65,13 +65,14 @@ public AccessHeatMapEndpoint(HeatMapServiceImpl heatMapService) { * with volume, buckets under that volume, * then directories, subdirectories and paths * under that bucket. - * E.g. -------->> + *
    +   * E.g. -------->>
        * vol1                           vol2
        * - bucket1                      - bucket2
        * - dir1/dir2/key1               - dir4/dir1/key1
        * - dir1/dir2/key2               - dir4/dir5/key2
        * - dir1/dir3/key1               - dir5/dir3/key1
    -   *
    +   * 
    * @return {@link Response} */ @GET diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 86ef6c022d5..33fc4fd96de 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -157,15 +157,15 @@ public ContainerEndpoint(OzoneStorageContainerManager reconSCM, } /** - * Return @{@link org.apache.hadoop.hdds.scm.container} + * Return {@code org.apache.hadoop.hdds.scm.container} * for the containers starting from the given "prev-key" query param for the * given "limit". The given "prev-key" is skipped from the results returned. * * @param prevKey the containerID after which results are returned. - * start containerID, >=0, + * start containerID, >=0, * start searching at the head if 0. * @param limit max no. of containers to get. - * count must be >= 0 + * count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @return {@link Response} @@ -408,13 +408,18 @@ public Response getUnhealthyContainers( summary = containerHealthSchemaManager.getUnhealthyContainersSummary(); List containers = containerHealthSchemaManager .getUnhealthyContainers(internalState, offset, limit); - List emptyMissingFiltered = containers.stream() - .filter( - container -> !container.getContainerState() - .equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) - .collect( - Collectors.toList()); - for (UnhealthyContainers c : emptyMissingFiltered) { + + // Filtering out EMPTY_MISSING and NEGATIVE_SIZE containers from the response. + // These container states are not being inserted into the database as they represent + // edge cases that are not critical to track as unhealthy containers. + List filteredContainers = containers.stream() + .filter(container -> !container.getContainerState() + .equals(UnHealthyContainerStates.EMPTY_MISSING.toString()) + && !container.getContainerState() + .equals(UnHealthyContainerStates.NEGATIVE_SIZE.toString())) + .collect(Collectors.toList()); + + for (UnhealthyContainers c : filteredContainers) { long containerID = c.getContainerId(); ContainerInfo containerInfo = containerManager.getContainer(ContainerID.valueOf(containerID)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 7f0efe97dd9..543b8e388a9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -152,29 +152,28 @@ public Response getDatanodes() { } }); try { - builder.withContainers(nodeManager.getContainerCount(datanode)); - builder.withOpenContainers(openContainers.get()); + builder.setContainers(nodeManager.getContainerCount(datanode)); + builder.setOpenContainers(openContainers.get()); } catch (NodeNotFoundException ex) { LOG.warn("Cannot get containers, datanode {} not found.", datanode.getUuid(), ex); } DatanodeInfo dnInfo = (DatanodeInfo) datanode; - datanodes.add(builder.withHostname(nodeManager.getHostName(datanode)) - .withDatanodeStorageReport(storageReport) - .withLastHeartbeat(nodeManager.getLastHeartbeat(datanode)) - .withState(nodeState) - .withOperationalState(nodeOpState) - .withPipelines(pipelines) - .withLeaderCount(leaderCount.get()) - .withUUid(datanode.getUuidString()) - .withVersion(nodeManager.getVersion(datanode)) - .withSetupTime(nodeManager.getSetupTime(datanode)) - .withRevision(nodeManager.getRevision(datanode)) - .withBuildDate(nodeManager.getBuildDate(datanode)) - .withLayoutVersion( + datanodes.add(builder.setHostname(nodeManager.getHostName(datanode)) + .setDatanodeStorageReport(storageReport) + .setLastHeartbeat(nodeManager.getLastHeartbeat(datanode)) + .setState(nodeState) + .setOperationalState(nodeOpState) + .setPipelines(pipelines) + .setLeaderCount(leaderCount.get()) + .setUuid(datanode.getUuidString()) + .setVersion(nodeManager.getVersion(datanode)) + .setSetupTime(nodeManager.getSetupTime(datanode)) + .setRevision(nodeManager.getRevision(datanode)) + .setLayoutVersion( dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion()) - .withNetworkLocation(datanode.getNetworkLocation()) + .setNetworkLocation(datanode.getNetworkLocation()) .build()); }); @@ -221,26 +220,26 @@ public Response removeDatanodes(List uuids) { try { if (preChecksSuccess(nodeByUuid, failedNodeErrorResponseMap)) { removedDatanodes.add(DatanodeMetadata.newBuilder() - .withHostname(nodeManager.getHostName(nodeByUuid)) - .withUUid(uuid) - .withState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) + .setHostname(nodeManager.getHostName(nodeByUuid)) + .setUuid(uuid) + .setState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) .build()); nodeManager.removeNode(nodeByUuid); LOG.info("Node {} removed successfully !!!", uuid); } else { failedDatanodes.add(DatanodeMetadata.newBuilder() - .withHostname(nodeManager.getHostName(nodeByUuid)) - .withUUid(uuid) - .withOperationalState(nodeByUuid.getPersistedOpState()) - .withState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) + .setHostname(nodeManager.getHostName(nodeByUuid)) + .setUuid(uuid) + .setOperationalState(nodeByUuid.getPersistedOpState()) + .setState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) .build()); } } catch (NodeNotFoundException nnfe) { LOG.error("Selected node {} not found : {} ", uuid, nnfe); notFoundDatanodes.add(DatanodeMetadata.newBuilder() - .withHostname("") - .withState(NodeState.DEAD) - .withUUid(uuid).build()); + .setHostname("") + .setState(NodeState.DEAD) + .setUuid(uuid).build()); } } } catch (Exception exp) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 3f95c04fc91..64da15db413 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -55,30 +56,31 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; +import java.util.Collections; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.TimeZone; -import java.util.function.Predicate; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_NON_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_NON_FSO; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_START_PREFIX; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; @@ -176,101 +178,133 @@ public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, @Path("/open") public Response getOpenKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKey, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) - @QueryParam(RECON_OPEN_KEY_INCLUDE_FSO) - boolean includeFso, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) - @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) - boolean includeNonFso) { + String prevKey, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_START_PREFIX) + String startPrefix, + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_FSO) + boolean includeFso, + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) + boolean includeNonFso) { + KeyInsightInfoResponse openKeyInsightInfo = new KeyInsightInfoResponse(); - List nonFSOKeyInfoList = - openKeyInsightInfo.getNonFSOKeyInfoList(); - - boolean skipPrevKeyDone = false; - boolean isLegacyBucketLayout = true; - boolean recordsFetchedLimitReached = false; - - String lastKey = ""; - List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); - for (BucketLayout layout : Arrays.asList( - BucketLayout.LEGACY, BucketLayout.FILE_SYSTEM_OPTIMIZED)) { - isLegacyBucketLayout = (layout == BucketLayout.LEGACY); - // Skip bucket iteration based on parameters includeFso and includeNonFso - if ((!includeFso && !isLegacyBucketLayout) || - (!includeNonFso && isLegacyBucketLayout)) { - continue; + + try { + long replicatedTotal = 0; + long unreplicatedTotal = 0; + boolean skipPrevKeyDone = false; // Tracks if prevKey was used earlier + boolean keysFound = false; // Flag to track if any keys are found + String lastKey = null; + Map obsKeys = Collections.emptyMap(); + Map fsoKeys = Collections.emptyMap(); + + // Validate startPrefix if it's provided + if (isNotBlank(startPrefix) && !validateStartPrefix(startPrefix)) { + return createBadRequestResponse("Invalid startPrefix: Path must be at the bucket level or deeper."); } - Table openKeyTable = - omMetadataManager.getOpenKeyTable(layout); - try ( - TableIterator> - keyIter = openKeyTable.iterator()) { - boolean skipPrevKey = false; - String seekKey = prevKey; - if (!skipPrevKeyDone && StringUtils.isNotBlank(prevKey)) { - skipPrevKey = true; - Table.KeyValue seekKeyValue = - keyIter.seek(seekKey); - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys are returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && - !seekKeyValue.getKey().equals(prevKey))) { - continue; - } + // Use searchOpenKeys logic with adjustments for FSO and Non-FSO filtering + if (includeNonFso) { + // Search for non-FSO keys in KeyTable + Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); + obsKeys = ReconUtils.extractKeysFromTable(openKeyTable, startPrefix, limit, prevKey); + for (Map.Entry entry : obsKeys.entrySet()) { + keysFound = true; + skipPrevKeyDone = true; // Don't use the prevKey for the file table + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + openKeyInsightInfo.getNonFSOKeyInfoList().add(keyEntityInfo); // Add to non-FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); + lastKey = entry.getKey(); // Update lastKey } - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - String key = kv.getKey(); - lastKey = key; - OmKeyInfo omKeyInfo = kv.getValue(); - // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKey)) { - skipPrevKeyDone = true; - continue; - } - KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(key); - keyEntityInfo.setPath(omKeyInfo.getKeyName()); - keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); - keyEntityInfo.setSize(omKeyInfo.getDataSize()); - keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); - keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); - openKeyInsightInfo.setUnreplicatedDataSize( - openKeyInsightInfo.getUnreplicatedDataSize() + - keyEntityInfo.getSize()); - openKeyInsightInfo.setReplicatedDataSize( - openKeyInsightInfo.getReplicatedDataSize() + - keyEntityInfo.getReplicatedSize()); - boolean added = - isLegacyBucketLayout ? nonFSOKeyInfoList.add(keyEntityInfo) : - fsoKeyInfoList.add(keyEntityInfo); - if ((nonFSOKeyInfoList.size() + fsoKeyInfoList.size()) == limit) { - recordsFetchedLimitReached = true; - break; - } + } + + if (includeFso) { + // Search for FSO keys in FileTable + // If prevKey was used for non-FSO keys, skip it for FSO keys. + String effectivePrevKey = skipPrevKeyDone ? "" : prevKey; + // If limit = -1 then we need to fetch all keys without limit + int effectiveLimit = limit == -1 ? limit : limit - obsKeys.size(); + fsoKeys = searchOpenKeysInFSO(startPrefix, effectiveLimit, effectivePrevKey); + for (Map.Entry entry : fsoKeys.entrySet()) { + keysFound = true; + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + openKeyInsightInfo.getFsoKeyInfoList().add(keyEntityInfo); // Add to FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); + lastKey = entry.getKey(); // Update lastKey } - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); } - if (recordsFetchedLimitReached) { - break; + + // If no keys were found, return a response indicating that no keys matched + if (!keysFound) { + return noMatchedKeysResponse(startPrefix); } + + // Set the aggregated totals in the response + openKeyInsightInfo.setReplicatedDataSize(replicatedTotal); + openKeyInsightInfo.setUnreplicatedDataSize(unreplicatedTotal); + openKeyInsightInfo.setLastKey(lastKey); + + // Return the response with the matched keys and their data sizes + return Response.ok(openKeyInsightInfo).build(); + } catch (IOException e) { + // Handle IO exceptions and return an internal server error response + return createInternalServerErrorResponse("Error searching open keys in OM DB: " + e.getMessage()); + } catch (IllegalArgumentException e) { + // Handle illegal argument exceptions and return a bad request response + return createBadRequestResponse("Invalid argument: " + e.getMessage()); } + } - openKeyInsightInfo.setLastKey(lastKey); - return Response.ok(openKeyInsightInfo).build(); + public Map searchOpenKeysInFSO(String startPrefix, + int limit, String prevKey) + throws IOException, IllegalArgumentException { + Map matchedKeys = new LinkedHashMap<>(); + // Convert the search prefix to an object path for FSO buckets + String startPrefixObjectPath = ReconUtils.convertToObjectPathForOpenKeySearch( + startPrefix, omMetadataManager, reconNamespaceSummaryManager, reconSCM); + String[] names = parseRequestPath(startPrefixObjectPath); + Table openFileTable = + omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // If names.length <= 2, then the search prefix is at the volume or bucket level hence + // no need to find parent or extract id's or find subpaths as the openFileTable is + // suitable for volume and bucket level search + if (names.length > 2 && startPrefixObjectPath.endsWith(OM_KEY_PREFIX)) { + // Fetch the parent ID to search for + long parentId = Long.parseLong(names[names.length - 1]); + + // Fetch the nameSpaceSummary for the parent ID + NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); + if (parentSummary == null) { + return matchedKeys; + } + List subPaths = new ArrayList<>(); + // Add the initial search prefix object path because it can have both openFiles + // and subdirectories with openFiles + subPaths.add(startPrefixObjectPath); + + // Recursively gather all subpaths + ReconUtils.gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1]), + reconNamespaceSummaryManager); + + // Iterate over the subpaths and retrieve the open files + for (String subPath : subPaths) { + matchedKeys.putAll( + ReconUtils.extractKeysFromTable(openFileTable, subPath, limit - matchedKeys.size(), prevKey)); + if (matchedKeys.size() >= limit) { + break; + } + } + return matchedKeys; + } + + // If the search level is at the volume, bucket or key level, directly search the openFileTable + matchedKeys.putAll( + ReconUtils.extractKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); + return matchedKeys; } /** @@ -339,62 +373,6 @@ private Long getValueFromId(GlobalStats record) { return record != null ? record.getValue() : 0L; } - private void getPendingForDeletionKeyInfo( - int limit, - String prevKey, - KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { - List repeatedOmKeyInfoList = - deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); - Table deletedTable = - omMetadataManager.getDeletedTable(); - try ( - TableIterator> - keyIter = deletedTable.iterator()) { - boolean skipPrevKey = false; - String seekKey = prevKey; - String lastKey = ""; - if (StringUtils.isNotBlank(prevKey)) { - skipPrevKey = true; - Table.KeyValue seekKeyValue = - keyIter.seek(seekKey); - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys are returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && - !seekKeyValue.getKey().equals(prevKey))) { - return; - } - } - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - String key = kv.getKey(); - lastKey = key; - RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); - // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKey)) { - continue; - } - updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, - repeatedOmKeyInfo); - repeatedOmKeyInfoList.add(repeatedOmKeyInfo); - if ((repeatedOmKeyInfoList.size()) == limit) { - break; - } - } - deletedKeyAndDirInsightInfo.setLastKey(lastKey); - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } - } - /** Retrieves the summary of deleted keys. * * This method calculates and returns a summary of deleted keys. @@ -428,6 +406,7 @@ public Response getDeletedKeySummary() { * limit - limits the number of key/files returned. * prevKey - E.g. /vol1/bucket1/key1, this will skip keys till it * seeks correctly to the given prevKey. + * startPrefix - E.g. /vol1/bucket1, this will return keys matching this prefix. * Sample API Response: * { * "lastKey": "vol1/bucket1/key1", @@ -476,17 +455,90 @@ public Response getDeletedKeySummary() { @GET @Path("/deletePending") public Response getDeletedKeyInfo( - @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, - @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKey) { - KeyInsightInfoResponse - deletedKeyInsightInfo = new KeyInsightInfoResponse(); - getPendingForDeletionKeyInfo(limit, prevKey, - deletedKeyInsightInfo); + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKey, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_START_PREFIX) String startPrefix) { + + // Initialize the response object to hold the key information + KeyInsightInfoResponse deletedKeyInsightInfo = new KeyInsightInfoResponse(); + + boolean keysFound = false; + + try { + // Validate startPrefix if it's provided + if (isNotBlank(startPrefix) && !validateStartPrefix(startPrefix)) { + return createBadRequestResponse("Invalid startPrefix: Path must be at the bucket level or deeper."); + } + + // Perform the search based on the limit, prevKey, and startPrefix + keysFound = getPendingForDeletionKeyInfo(limit, prevKey, startPrefix, deletedKeyInsightInfo); + + } catch (IllegalArgumentException e) { + LOG.error("Invalid startPrefix provided: {}", startPrefix, e); + return createBadRequestResponse("Invalid startPrefix: " + e.getMessage()); + } catch (IOException e) { + LOG.error("I/O error while searching deleted keys in OM DB", e); + return createInternalServerErrorResponse("Error searching deleted keys in OM DB: " + e.getMessage()); + } catch (Exception e) { + LOG.error("Unexpected error occurred while searching deleted keys", e); + return createInternalServerErrorResponse("Unexpected error: " + e.getMessage()); + } + + if (!keysFound) { + return noMatchedKeysResponse(""); + } + return Response.ok(deletedKeyInsightInfo).build(); } + /** + * Retrieves keys pending deletion based on startPrefix, filtering keys matching the prefix. + * + * @param limit The limit of records to return. + * @param prevKey Pagination key. + * @param startPrefix The search prefix. + * @param deletedKeyInsightInfo The response object to populate. + */ + private boolean getPendingForDeletionKeyInfo( + int limit, String prevKey, String startPrefix, + KeyInsightInfoResponse deletedKeyInsightInfo) throws IOException { + + long replicatedTotal = 0; + long unreplicatedTotal = 0; + boolean keysFound = false; + String lastKey = null; + + // Search for deleted keys in DeletedTable + Table deletedTable = omMetadataManager.getDeletedTable(); + Map deletedKeys = + ReconUtils.extractKeysFromTable(deletedTable, startPrefix, limit, prevKey); + + // Iterate over the retrieved keys and populate the response + for (Map.Entry entry : deletedKeys.entrySet()) { + keysFound = true; + RepeatedOmKeyInfo repeatedOmKeyInfo = entry.getValue(); + + // We know each RepeatedOmKeyInfo has just one OmKeyInfo object + OmKeyInfo keyInfo = repeatedOmKeyInfo.getOmKeyInfoList().get(0); + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), keyInfo); + + // Add the key directly to the list without classification + deletedKeyInsightInfo.getRepeatedOmKeyInfoList().add(repeatedOmKeyInfo); + + replicatedTotal += keyInfo.getReplicatedSize(); + unreplicatedTotal += keyInfo.getDataSize(); + + lastKey = entry.getKey(); // Update lastKey + } + + // Set the aggregated totals in the response + deletedKeyInsightInfo.setReplicatedDataSize(replicatedTotal); + deletedKeyInsightInfo.setUnreplicatedDataSize(unreplicatedTotal); + deletedKeyInsightInfo.setLastKey(lastKey); + + return keysFound; + } + /** * Creates a keys summary for deleted keys and updates the provided * keysSummary map. Calculates the total number of deleted keys, replicated @@ -526,7 +578,7 @@ private void getPendingForDeletionDirInfo( boolean skipPrevKey = false; String seekKey = prevKey; String lastKey = ""; - if (StringUtils.isNotBlank(prevKey)) { + if (isNotBlank(prevKey)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -534,7 +586,7 @@ private void getPendingForDeletionDirInfo( // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && + (isNotBlank(prevKey) && !seekKeyValue.getKey().equals(prevKey))) { return; } @@ -549,6 +601,7 @@ private void getPendingForDeletionDirInfo( continue; } KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setIsKey(omKeyInfo.isFile()); keyEntityInfo.setKey(omKeyInfo.getFileName()); keyEntityInfo.setPath(createPath(omKeyInfo)); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); @@ -734,7 +787,7 @@ public Response getDeletedDirectorySummary() { * /volume1/fso-bucket/dir1/dir2/dir3/file1 * Input Request for OBS bucket: * - * `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS` + * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS`} * Output Response: * * { @@ -832,7 +885,7 @@ public Response getDeletedDirectorySummary() { * } * Input Request for FSO bucket: * - * `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS` + * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS`} * Output Response: * * { @@ -930,7 +983,6 @@ public Response getDeletedDirectorySummary() { * } * * ******************************************************** - * @throws IOException */ @GET @Path("/listKeys") @@ -954,20 +1006,20 @@ public Response listKeys(@QueryParam("replicationType") String replicationType, ListKeysResponse listKeysResponse = new ListKeysResponse(); if (!ReconUtils.isInitializationComplete(omMetadataManager)) { listKeysResponse.setStatus(ResponseStatus.INITIALIZING); - return Response.ok(listKeysResponse).build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).entity(listKeysResponse).build(); } ParamInfo paramInfo = new ParamInfo(replicationType, creationDate, keySize, startPrefix, prevKey, limit, false, ""); Response response = getListKeysResponse(paramInfo); if ((response.getStatus() != Response.Status.OK.getStatusCode()) && - (response.getStatus() != Response.Status.NOT_FOUND.getStatusCode())) { + (response.getStatus() != Response.Status.NO_CONTENT.getStatusCode())) { return response; } if (response.getEntity() instanceof ListKeysResponse) { listKeysResponse = (ListKeysResponse) response.getEntity(); } - List keyInfoList = listKeysResponse.getKeys(); + List keyInfoList = listKeysResponse.getKeys(); if (!keyInfoList.isEmpty()) { listKeysResponse.setLastKey(keyInfoList.get(keyInfoList.size() - 1).getKey()); } @@ -975,72 +1027,58 @@ public Response listKeys(@QueryParam("replicationType") String replicationType, } private Response getListKeysResponse(ParamInfo paramInfo) { + ListKeysResponse listKeysResponse = new ListKeysResponse(); try { paramInfo.setLimit(Math.max(0, paramInfo.getLimit())); // Ensure limit is non-negative - ListKeysResponse listKeysResponse = new ListKeysResponse(); listKeysResponse.setPath(paramInfo.getStartPrefix()); long replicatedTotal = 0; long unreplicatedTotal = 0; - boolean keysFound = false; // Flag to track if any keys are found // Search keys from non-FSO layout. - Map obsKeys; - Table keyTable = - omMetadataManager.getKeyTable(BucketLayout.LEGACY); - obsKeys = retrieveKeysFromTable(keyTable, paramInfo); - for (Map.Entry entry : obsKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - - listKeysResponse.getKeys().add(keyEntityInfo); - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - } + Table keyTable = + omMetadataManager.getKeyTableLite(BucketLayout.LEGACY); + retrieveKeysFromTable(keyTable, paramInfo, listKeysResponse.getKeys()); // Search keys from FSO layout. - Map fsoKeys = searchKeysInFSO(paramInfo); - for (Map.Entry entry : fsoKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - - listKeysResponse.getKeys().add(keyEntityInfo); - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - } + searchKeysInFSO(paramInfo, listKeysResponse.getKeys()); // If no keys were found, return a response indicating that no keys matched - if (!keysFound) { + if (listKeysResponse.getKeys().isEmpty()) { return ReconResponseUtils.noMatchedKeysResponse(paramInfo.getStartPrefix()); } + for (KeyEntityInfoProtoWrapper keyEntityInfo : listKeysResponse.getKeys()) { + replicatedTotal += keyEntityInfo.getReplicatedSize(); + unreplicatedTotal += keyEntityInfo.getSize(); + } + // Set the aggregated totals in the response listKeysResponse.setReplicatedDataSize(replicatedTotal); listKeysResponse.setUnReplicatedDataSize(unreplicatedTotal); return Response.ok(listKeysResponse).build(); - } catch (IOException e) { - return ReconResponseUtils.createInternalServerErrorResponse( - "Error listing keys from OM DB: " + e.getMessage()); } catch (RuntimeException e) { + if (e instanceof ServiceNotReadyException) { + listKeysResponse.setStatus(ResponseStatus.INITIALIZING); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).entity(listKeysResponse).build(); + } + LOG.error("Error generating listKeys response", e); return ReconResponseUtils.createInternalServerErrorResponse( "Unexpected runtime error while searching keys in OM DB: " + e.getMessage()); } catch (Exception e) { + LOG.error("Error generating listKeys response", e); return ReconResponseUtils.createInternalServerErrorResponse( "Error listing keys from OM DB: " + e.getMessage()); } } - public Map searchKeysInFSO(ParamInfo paramInfo) + public void searchKeysInFSO(ParamInfo paramInfo, List results) throws IOException { - int originalLimit = paramInfo.getLimit(); - Map matchedKeys = new LinkedHashMap<>(); // Convert the search prefix to an object path for FSO buckets String startPrefixObjectPath = convertStartPrefixPathToObjectIdPath(paramInfo.getStartPrefix()); String[] names = parseRequestPath(startPrefixObjectPath); - Table fileTable = - omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table fileTable = + omMetadataManager.getKeyTableLite(BucketLayout.FILE_SYSTEM_OPTIMIZED); // If names.length > 2, then the search prefix is at the level above bucket level hence // no need to find parent or extract id's or find subpaths as the fileTable is @@ -1053,7 +1091,7 @@ public Map searchKeysInFSO(ParamInfo paramInfo) NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (parentSummary == null) { - return matchedKeys; + return; } List subPaths = new ArrayList<>(); // Add the initial search prefix object path because it can have both files and subdirectories with files. @@ -1065,21 +1103,17 @@ public Map searchKeysInFSO(ParamInfo paramInfo) // Iterate over the subpaths and retrieve the files for (String subPath : subPaths) { paramInfo.setStartPrefix(subPath); - matchedKeys.putAll( - retrieveKeysFromTable(fileTable, paramInfo)); - paramInfo.setLimit(originalLimit - matchedKeys.size()); - if (matchedKeys.size() >= originalLimit) { + retrieveKeysFromTable(fileTable, paramInfo, results); + if (results.size() >= paramInfo.getLimit()) { break; } } - return matchedKeys; + return; } paramInfo.setStartPrefix(startPrefixObjectPath); // Iterate over for bucket and volume level search - matchedKeys.putAll( - retrieveKeysFromTable(fileTable, paramInfo)); - return matchedKeys; + retrieveKeysFromTable(fileTable, paramInfo, results); } @@ -1152,32 +1186,34 @@ public String convertStartPrefixPathToObjectIdPath(String startPrefixPath) * @return A map of keys and their corresponding OmKeyInfo objects. * @throws IOException If there are problems accessing the table. */ - private Map retrieveKeysFromTable( - Table table, ParamInfo paramInfo) + private void retrieveKeysFromTable( + Table table, ParamInfo paramInfo, List results) throws IOException { boolean skipPrevKey = false; String seekKey = paramInfo.getPrevKey(); - Map matchedKeys = new LinkedHashMap<>(); try ( - TableIterator> keyIter = table.iterator()) { + TableIterator> keyIter = table.iterator()) { - if (!paramInfo.isSkipPrevKeyDone() && StringUtils.isNotBlank(seekKey)) { + if (!paramInfo.isSkipPrevKeyDone() && isNotBlank(seekKey)) { skipPrevKey = true; - Table.KeyValue seekKeyValue = + Table.KeyValue seekKeyValue = keyIter.seek(seekKey); // check if RocksDB was able to seek correctly to the given key prefix // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || (!seekKeyValue.getKey().equals(paramInfo.getPrevKey()))) { - return matchedKeys; + return; } } else { keyIter.seek(paramInfo.getStartPrefix()); } + long prevParentID = -1; + StringBuilder keyPrefix = null; + int keyPrefixLength = 0; while (keyIter.hasNext()) { - Table.KeyValue entry = keyIter.next(); + Table.KeyValue entry = keyIter.next(); String dbKey = entry.getKey(); if (!dbKey.startsWith(paramInfo.getStartPrefix())) { break; // Exit the loop if the key no longer matches the prefix @@ -1187,9 +1223,37 @@ private Map retrieveKeysFromTable( continue; } if (applyFilters(entry, paramInfo)) { - matchedKeys.put(dbKey, entry.getValue()); + KeyEntityInfoProtoWrapper keyEntityInfo = entry.getValue(); + keyEntityInfo.setKey(dbKey); + if (keyEntityInfo.getParentId() == 0) { + // Legacy bucket keys have a parentID of zero. OBS bucket keys have a parentID of the bucketID. + // FSO keys have a parent of the immediate parent directory. + // Legacy buckets are obsolete, so this code path is not optimized. We don't expect to see many Legacy + // buckets in practice. + prevParentID = -1; + keyEntityInfo.setPath(ReconUtils.constructFullPath(keyEntityInfo.getKeyName(), keyEntityInfo.getParentId(), + keyEntityInfo.getVolumeName(), keyEntityInfo.getBucketName(), reconNamespaceSummaryManager, + omMetadataManager)); + } else { + // As we iterate keys in sorted order, its highly likely that keys have the same prefix for many keys in a + // row. Especially for FSO buckets, its expensive to construct the path for each key. So, we construct the + // prefix once and reuse it for each identical parent. Only if the parent changes do we need to construct + // a new prefix path. + if (prevParentID != keyEntityInfo.getParentId()) { + prevParentID = keyEntityInfo.getParentId(); + keyPrefix = ReconUtils.constructFullPathPrefix(keyEntityInfo.getParentId(), + keyEntityInfo.getVolumeName(), keyEntityInfo.getBucketName(), reconNamespaceSummaryManager, + omMetadataManager); + keyPrefixLength = keyPrefix.length(); + } + keyPrefix.setLength(keyPrefixLength); + keyPrefix.append(keyEntityInfo.getKeyName()); + keyEntityInfo.setPath(keyPrefix.toString()); + } + + results.add(keyEntityInfo); paramInfo.setLastKey(dbKey); - if (matchedKeys.size() >= paramInfo.getLimit()) { + if (results.size() >= paramInfo.getLimit()) { break; } } @@ -1198,53 +1262,25 @@ private Map retrieveKeysFromTable( LOG.error("Error retrieving keys from table for path: {}", paramInfo.getStartPrefix(), exception); throw exception; } - return matchedKeys; } - private boolean applyFilters(Table.KeyValue entry, ParamInfo paramInfo) throws IOException { + private boolean applyFilters(Table.KeyValue entry, ParamInfo paramInfo) + throws IOException { LOG.debug("Applying filters on : {}", entry.getKey()); - long epochMillis = - ReconUtils.convertToEpochMillis(paramInfo.getCreationDate(), "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); - Predicate> keyAgeFilter = keyData -> { - try { - return keyData.getValue().getCreationTime() >= epochMillis; - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - Predicate> keyReplicationFilter = - keyData -> { - try { - return keyData.getValue().getReplicationConfig().getReplicationType().name() - .equals(paramInfo.getReplicationType()); - } catch (IOException e) { - try { - throw new IOException(e); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - } - }; - Predicate> keySizeFilter = keyData -> { - try { - return keyData.getValue().getDataSize() >= paramInfo.getKeySize(); - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - - List> filteredKeyList = Stream.of(entry) - .filter(keyData -> !StringUtils.isEmpty(paramInfo.getCreationDate()) ? keyAgeFilter.test(keyData) : true) - .filter( - keyData -> !StringUtils.isEmpty(paramInfo.getReplicationType()) ? keyReplicationFilter.test(keyData) : true) - .filter(keySizeFilter) - .collect(Collectors.toList()); + if (!StringUtils.isEmpty(paramInfo.getCreationDate()) + && (entry.getValue().getCreationTime() < paramInfo.getCreationDateEpoch())) { + return false; + } - LOG.debug("After applying filter on : {}, filtered list size: {}", entry.getKey(), filteredKeyList.size()); + if (!StringUtils.isEmpty(paramInfo.getReplicationType()) + && !entry.getValue().getReplicationConfig().getReplicationType().name().equals( + paramInfo.getReplicationType())) { + return false; + } - return (filteredKeyList.size() > 0); + return entry.getValue().getSize() >= paramInfo.getKeySize(); } /** @@ -1258,8 +1294,8 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, OmKeyInfo keyInfo) throws IOException { KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); keyEntityInfo.setKey(dbKey); // Set the DB key - keyEntityInfo.setPath(ReconUtils.constructFullPath(keyInfo, reconNamespaceSummaryManager, - omMetadataManager)); + keyEntityInfo.setIsKey(keyInfo.isFile()); + keyEntityInfo.setPath(ReconUtils.constructFullPath(keyInfo, reconNamespaceSummaryManager, omMetadataManager)); keyEntityInfo.setSize(keyInfo.getDataSize()); keyEntityInfo.setCreationTime(keyInfo.getCreationTime()); keyEntityInfo.setModificationTime(keyInfo.getModificationTime()); @@ -1277,17 +1313,18 @@ private void createSummaryForDeletedDirectories( dirSummary.put("totalDeletedDirectories", deletedDirCount); } - private void updateReplicatedAndUnReplicatedTotal( - KeyInsightInfoResponse deletedKeyAndDirInsightInfo, - RepeatedOmKeyInfo repeatedOmKeyInfo) { - repeatedOmKeyInfo.getOmKeyInfoList().forEach(omKeyInfo -> { - deletedKeyAndDirInsightInfo.setUnreplicatedDataSize( - deletedKeyAndDirInsightInfo.getUnreplicatedDataSize() + - omKeyInfo.getDataSize()); - deletedKeyAndDirInsightInfo.setReplicatedDataSize( - deletedKeyAndDirInsightInfo.getReplicatedDataSize() + - omKeyInfo.getReplicatedSize()); - }); + private boolean validateStartPrefix(String startPrefix) { + + // Ensure startPrefix starts with '/' for non-empty values + startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; + + // Split the path to ensure it's at least at the bucket level (volume/bucket). + String[] pathComponents = startPrefix.split("/"); + if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { + return false; // Invalid if not at bucket level or deeper + } + + return true; } private String createPath(OmKeyInfo omKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java deleted file mode 100644 index 9cd6fa33d03..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ /dev/null @@ -1,389 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.api; - -import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; -import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; -import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; -import org.apache.hadoop.ozone.recon.api.types.NSSummary; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.QueryParam; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import java.io.IOException; -import java.util.Map; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.ArrayList; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_START_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY; -import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; -import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; -import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; -import static org.apache.hadoop.ozone.recon.ReconUtils.constructObjectPathWithPrefix; -import static org.apache.hadoop.ozone.recon.ReconUtils.validateNames; -import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; -import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; -import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; - -/** - * REST endpoint for search implementation in OM DB Insight. - */ -@Path("/keys") -@Produces(MediaType.APPLICATION_JSON) -@AdminOnly -public class OMDBInsightSearchEndpoint { - - private OzoneStorageContainerManager reconSCM; - private final ReconOMMetadataManager omMetadataManager; - private static final Logger LOG = - LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class); - private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; - - - @Inject - public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, - ReconOMMetadataManager omMetadataManager, - ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager) { - this.reconSCM = reconSCM; - this.omMetadataManager = omMetadataManager; - this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; - } - - - /** - * Performs a search for open keys in the Ozone Manager (OM) database using a specified search prefix. - * This endpoint searches across both File System Optimized (FSO) and Object Store (non-FSO) layouts, - * compiling a list of keys that match the given prefix along with their data sizes. - *

    - * The search prefix must start from the bucket level ('/volumeName/bucketName/') or any specific directory - * or key level (e.g., '/volA/bucketA/dir1' for everything under 'dir1' inside 'bucketA' of 'volA'). - * The search operation matches the prefix against the start of keys' names within the OM DB. - *

    - * Example Usage: - * 1. A startPrefix of "/volA/bucketA/" retrieves every key under bucket 'bucketA' in volume 'volA'. - * 2. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. - * - * @param startPrefix The prefix for searching keys, starting from the bucket level or any specific path. - * @param limit Limits the number of returned keys. - * @param prevKey The key to start after for the next set of records. - * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. - * @throws IOException On failure to access the OM database or process the operation. - * @throws IllegalArgumentException If the provided startPrefix or other arguments are invalid. - */ - @GET - @Path("/open/search") - public Response searchOpenKeys( - @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix") - String startPrefix, - @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit") - int limit, - @DefaultValue(RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY) @QueryParam("prevKey") String prevKey) throws IOException { - - try { - // Ensure startPrefix is not null or empty and starts with '/' - if (startPrefix == null || startPrefix.length() == 0) { - return createBadRequestResponse( - "Invalid startPrefix: Path must be at the bucket level or deeper."); - } - startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; - - // Split the path to ensure it's at least at the bucket level - String[] pathComponents = startPrefix.split("/"); - if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { - return createBadRequestResponse( - "Invalid startPrefix: Path must be at the bucket level or deeper."); - } - - // Ensure the limit is non-negative - limit = Math.max(0, limit); - - // Initialize response object - KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); - long replicatedTotal = 0; - long unreplicatedTotal = 0; - boolean keysFound = false; // Flag to track if any keys are found - String lastKey = null; - - // Search for non-fso keys in KeyTable - Table openKeyTable = - omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); - Map obsKeys = - retrieveKeysFromTable(openKeyTable, startPrefix, limit, prevKey); - for (Map.Entry entry : obsKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getNonFSOKeyInfoList() - .add(keyEntityInfo); // Add to non-FSO list - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - lastKey = entry.getKey(); // Update lastKey - } - - // Search for fso keys in FileTable - Map fsoKeys = searchOpenKeysInFSO(startPrefix, limit, prevKey); - for (Map.Entry entry : fsoKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getFsoKeyInfoList() - .add(keyEntityInfo); // Add to FSO list - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - lastKey = entry.getKey(); // Update lastKey - } - - // If no keys were found, return a response indicating that no keys matched - if (!keysFound) { - return noMatchedKeysResponse(startPrefix); - } - - // Set the aggregated totals in the response - insightResponse.setReplicatedDataSize(replicatedTotal); - insightResponse.setUnreplicatedDataSize(unreplicatedTotal); - insightResponse.setLastKey(lastKey); - - // Return the response with the matched keys and their data sizes - return Response.ok(insightResponse).build(); - } catch (IOException e) { - // Handle IO exceptions and return an internal server error response - return createInternalServerErrorResponse( - "Error searching open keys in OM DB: " + e.getMessage()); - } catch (IllegalArgumentException e) { - // Handle illegal argument exceptions and return a bad request response - return createBadRequestResponse( - "Invalid startPrefix: " + e.getMessage()); - } - } - - public Map searchOpenKeysInFSO(String startPrefix, - int limit, String prevKey) - throws IOException, IllegalArgumentException { - Map matchedKeys = new LinkedHashMap<>(); - // Convert the search prefix to an object path for FSO buckets - String startPrefixObjectPath = convertToObjectPath(startPrefix); - String[] names = parseRequestPath(startPrefixObjectPath); - Table openFileTable = - omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - - // If names.length <= 2, then the search prefix is at the volume or bucket level hence - // no need to find parent or extract id's or find subpaths as the openFileTable is - // suitable for volume and bucket level search - if (names.length > 2 && startPrefixObjectPath.endsWith(OM_KEY_PREFIX)) { - // Fetch the parent ID to search for - long parentId = Long.parseLong(names[names.length - 1]); - - // Fetch the nameSpaceSummary for the parent ID - NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); - if (parentSummary == null) { - return matchedKeys; - } - List subPaths = new ArrayList<>(); - // Add the initial search prefix object path because it can have both openFiles - // and subdirectories with openFiles - subPaths.add(startPrefixObjectPath); - - // Recursively gather all subpaths - ReconUtils.gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1]), - reconNamespaceSummaryManager); - - // Iterate over the subpaths and retrieve the open files - for (String subPath : subPaths) { - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, subPath, limit - matchedKeys.size(), prevKey)); - if (matchedKeys.size() >= limit) { - break; - } - } - return matchedKeys; - } - - // If the search level is at the volume, bucket or key level, directly search the openFileTable - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); - return matchedKeys; - } - - /** - * Converts a key prefix into an object path for FSO buckets, using IDs. - * - * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into - * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names - * with their corresponding IDs. It simplifies database queries for FSO bucket operations. - * - * Examples: - * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key" - * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/" - * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1" - * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/" - * - * @param prevKeyPrefix The path to be converted. - * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. - * @throws IOException If database access fails. - * @throws IllegalArgumentException If the provided path is invalid or cannot be converted. - */ - public String convertToObjectPath(String prevKeyPrefix) throws IOException { - try { - String[] names = parseRequestPath(normalizePath(prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); - Table openFileTable = omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - - // Root-Level: Return the original path - if (names.length == 0) { - return prevKeyPrefix; - } - - // Volume-Level: Fetch the volumeID - String volumeName = names[0]; - validateNames(volumeName); - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); - if (names.length == 1) { - return constructObjectPathWithPrefix(volumeId); - } - - // Bucket-Level: Fetch the bucketID - String bucketName = names[1]; - validateNames(bucketName); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); - long bucketId = bucketInfo.getObjectID(); - if (names.length == 2 || bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { - return constructObjectPathWithPrefix(volumeId, bucketId); - } - - // Directory or Key-Level: Check both key and directory - BucketHandler handler = - getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - - if (names.length >= 3) { - String lastEntiry = names[names.length - 1]; - - // Check if the directory exists - OmDirectoryInfo dirInfo = handler.getDirInfo(names); - if (dirInfo != null && dirInfo.getName().equals(lastEntiry)) { - return constructObjectPathWithPrefix(volumeId, bucketId, dirInfo.getObjectID()) + OM_KEY_PREFIX; - } - - // Check if the key exists - long dirID = handler.getDirObjectId(names, names.length); - String keyKey = constructObjectPathWithPrefix(volumeId, bucketId, dirID) + - OM_KEY_PREFIX + lastEntiry; - OmKeyInfo keyInfo = openFileTable.getSkipCache(keyKey); - if (keyInfo != null && keyInfo.getFileName().equals(lastEntiry)) { - return constructObjectPathWithPrefix(volumeId, bucketId, - keyInfo.getParentObjectID()) + OM_KEY_PREFIX + lastEntiry; - } - - return prevKeyPrefix; - } - } catch (IllegalArgumentException e) { - LOG.error( - "IllegalArgumentException encountered while converting key prefix to object path: {}", - prevKeyPrefix, e); - throw e; - } catch (RuntimeException e) { - LOG.error( - "RuntimeException encountered while converting key prefix to object path: {}", - prevKeyPrefix, e); - return prevKeyPrefix; - } - return prevKeyPrefix; - } - - - /** - * Common method to retrieve keys from a table based on a search prefix and a limit. - * - * @param table The table to retrieve keys from. - * @param startPrefix The search prefix to match keys against. - * @param limit The maximum number of keys to retrieve. - * @param prevKey The key to start after for the next set of records. - * @return A map of keys and their corresponding OmKeyInfo objects. - * @throws IOException If there are problems accessing the table. - */ - private Map retrieveKeysFromTable( - Table table, String startPrefix, int limit, String prevKey) - throws IOException { - Map matchedKeys = new LinkedHashMap<>(); - try (TableIterator> keyIter = table.iterator()) { - // If a previous key is provided, seek to the previous key and skip it. - if (!prevKey.isEmpty()) { - keyIter.seek(prevKey); - if (keyIter.hasNext()) { - // Skip the previous key - keyIter.next(); - } - } else { - // If no previous key is provided, start from the search prefix. - keyIter.seek(startPrefix); - } - while (keyIter.hasNext() && matchedKeys.size() < limit) { - Table.KeyValue entry = keyIter.next(); - String dbKey = entry.getKey(); - if (!dbKey.startsWith(startPrefix)) { - break; // Exit the loop if the key no longer matches the prefix - } - matchedKeys.put(dbKey, entry.getValue()); - } - } catch (IOException exception) { - LOG.error("Error retrieving keys from table for path: {}", startPrefix, exception); - throw exception; - } - return matchedKeys; - } - - /** - * Creates a KeyEntityInfo object from an OmKeyInfo object and the corresponding key. - * - * @param dbKey The key in the database corresponding to the OmKeyInfo object. - * @param keyInfo The OmKeyInfo object to create the KeyEntityInfo from. - * @return The KeyEntityInfo object created from the OmKeyInfo object and the key. - */ - private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, - OmKeyInfo keyInfo) { - KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(dbKey); // Set the DB key - keyEntityInfo.setPath(keyInfo.getKeyName()); // Assuming path is the same as key name - keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); - keyEntityInfo.setSize(keyInfo.getDataSize()); - keyEntityInfo.setReplicatedSize(keyInfo.getReplicatedSize()); - keyEntityInfo.setReplicationConfig(keyInfo.getReplicationConfig()); - return keyEntityInfo; - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ServiceNotReadyException.java similarity index 73% rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java rename to hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ServiceNotReadyException.java index 00891cf3e24..4190cc279ce 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ServiceNotReadyException.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.container.replication; -import org.junit.jupiter.api.BeforeEach; +package org.apache.hadoop.ozone.recon.api; /** - * Tests {@link GrpcReplicationService}. + * This exception being thrown when Rest API service is still initializing and not yet ready. */ -class TestGrpcReplicationServiceWithZeroCopy - extends TestGrpcReplicationService { - @BeforeEach - public void setUp() throws Exception { - init(true); +public class ServiceNotReadyException extends RuntimeException { + public ServiceNotReadyException(String message) { + super(message); } } + diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java index 070b7e1ccd4..3ce4fc7f837 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TriggerDBSyncEndpoint.java @@ -32,6 +32,7 @@ */ @Path("/triggerdbsync") @Produces(MediaType.APPLICATION_JSON) +@AdminOnly public class TriggerDBSyncEndpoint { private OzoneManagerServiceProvider ozoneManagerServiceProvider; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 266caaa2d8e..a2db616ec2f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -118,7 +118,7 @@ public static String buildSubpath(String path, String nextLevel) { } /** - * Example: /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt. + * Example: {@literal /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt} . * @param names parsed request * @return key name */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java index fae47b3b368..51994abfbad 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java @@ -73,7 +73,6 @@ public static Builder newBuilder() { /** * Builder for AclMetadata. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String type; private String name; @@ -84,23 +83,23 @@ public Builder() { } - public Builder withType(String type) { + public Builder setType(String type) { this.type = type; return this; } - public Builder withName(String name) { + public Builder setName(String name) { this.name = name; return this; } - public Builder withScope(String scope) { + public Builder setScope(String scope) { this.scope = scope; return this; } - public Builder withAclList(List aclList) { + public Builder setAclList(List aclList) { this.aclList = aclList; return this; } @@ -127,10 +126,10 @@ public static AclMetadata fromOzoneAcl(OzoneAcl ozoneAcl) { AclMetadata.Builder builder = AclMetadata.newBuilder(); - return builder.withType(ozoneAcl.getType().toString().toUpperCase()) - .withName(ozoneAcl.getName()) - .withScope(ozoneAcl.getAclScope().toString().toUpperCase()) - .withAclList(ozoneAcl.getAclStringList()) + return builder.setType(ozoneAcl.getType().toString().toUpperCase()) + .setName(ozoneAcl.getName()) + .setScope(ozoneAcl.getAclScope().toString().toUpperCase()) + .setAclList(ozoneAcl.getAclStringList()) .build(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java index 6e595891c4d..424584a2be4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java @@ -138,7 +138,6 @@ private ClusterStateResponse(Builder b) { /** * Builder for ClusterStateResponse. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private int pipelines; private int totalDatanodes; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 06c20a963a2..ef64e921a31 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -84,10 +84,6 @@ public final class DatanodeMetadata { @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; - @XmlElement(name = "buildDate") - @JsonInclude(JsonInclude.Include.NON_NULL) - private String buildDate; - @XmlElement(name = "layoutVersion") @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @@ -110,7 +106,6 @@ private DatanodeMetadata(Builder builder) { this.version = builder.version; this.setupTime = builder.setupTime; this.revision = builder.revision; - this.buildDate = builder.buildDate; this.layoutVersion = builder.layoutVersion; this.networkLocation = builder.networkLocation; } @@ -167,10 +162,6 @@ public String getRevision() { return revision; } - public String getBuildDate() { - return buildDate; - } - public int getLayoutVersion() { return layoutVersion; } @@ -191,7 +182,6 @@ public static Builder newBuilder() { /** * Builder for DatanodeMetadata. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String hostname; private String uuid; @@ -206,7 +196,6 @@ public static final class Builder { private String version; private long setupTime; private String revision; - private String buildDate; private int layoutVersion; private String networkLocation; @@ -216,83 +205,78 @@ public Builder() { this.leaderCount = 0; } - public Builder withHostname(String hostname) { + public Builder setHostname(String hostname) { this.hostname = hostname; return this; } - public Builder withState(NodeState state) { + public Builder setState(NodeState state) { this.state = state; return this; } - public Builder withOperationalState(NodeOperationalState opState) { - this.opState = opState; + public Builder setOperationalState(NodeOperationalState operationalState) { + this.opState = operationalState; return this; } - public Builder withLastHeartbeat(long lastHeartbeat) { + public Builder setLastHeartbeat(long lastHeartbeat) { this.lastHeartbeat = lastHeartbeat; return this; } - public Builder withDatanodeStorageReport(DatanodeStorageReport + public Builder setDatanodeStorageReport(DatanodeStorageReport datanodeStorageReport) { this.datanodeStorageReport = datanodeStorageReport; return this; } - public Builder withPipelines(List pipelines) { + public Builder setPipelines(List pipelines) { this.pipelines = pipelines; return this; } - public Builder withContainers(int containers) { + public Builder setContainers(int containers) { this.containers = containers; return this; } - public Builder withOpenContainers(int openContainers) { + public Builder setOpenContainers(int openContainers) { this.openContainers = openContainers; return this; } - public Builder withLeaderCount(int leaderCount) { + public Builder setLeaderCount(int leaderCount) { this.leaderCount = leaderCount; return this; } - public Builder withUUid(String uuid) { + public Builder setUuid(String uuid) { this.uuid = uuid; return this; } - public Builder withVersion(String version) { + public Builder setVersion(String version) { this.version = version; return this; } - public Builder withSetupTime(long setupTime) { + public Builder setSetupTime(long setupTime) { this.setupTime = setupTime; return this; } - public Builder withRevision(String revision) { + public Builder setRevision(String revision) { this.revision = revision; return this; } - public Builder withBuildDate(String buildDate) { - this.buildDate = buildDate; - return this; - } - - public Builder withLayoutVersion(int layoutVersion) { + public Builder setLayoutVersion(int layoutVersion) { this.layoutVersion = layoutVersion; return this; } - public Builder withNetworkLocation(String networkLocation) { + public Builder setNetworkLocation(String networkLocation) { this.networkLocation = networkLocation; return this; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java index d7cd3599190..8a56cbbd33f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java @@ -143,7 +143,7 @@ public boolean isKey() { return isKey; } - public void setKey(boolean key) { - isKey = key; + public void setIsKey(boolean isKey) { + this.isKey = isKey; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java new file mode 100644 index 00000000000..890f8fbd3aa --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.DelegatedCodec; +import org.apache.hadoop.hdds.utils.db.Proto2Codec; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +/** + * POJO object wrapper for metadata of a given key/file. This class wraps a KeyInfo protobuf + * object and delegates most accessors to it. + */ +public final class KeyEntityInfoProtoWrapper { + + public static Codec getCodec() { + return new DelegatedCodec<>( + Proto2Codec.get(OzoneManagerProtocolProtos.KeyInfo.getDefaultInstance()), + KeyEntityInfoProtoWrapper::getFromProtobuf, + KeyEntityInfoProtoWrapper::toProtobuf, + KeyEntityInfoProtoWrapper.class); + } + + private final OzoneManagerProtocolProtos.KeyInfo keyInfoProto; + + /** This is key table key of rocksDB and will help UI to implement pagination + * where UI will use the last record key to send in API as preKeyPrefix. */ + @JsonProperty("key") + private String key; + + /** Path of a key/file. */ + @JsonProperty("path") + private String path; + + @JsonProperty("replicatedSize") + private final long replicatedSize; + + @JsonProperty("replicationInfo") + private final ReplicationConfig replicationConfig; + + private KeyEntityInfoProtoWrapper(OzoneManagerProtocolProtos.KeyInfo proto) { + keyInfoProto = proto; + replicationConfig = ReplicationConfig.fromProto(proto.getType(), proto.getFactor(), + proto.getEcReplicationConfig()); + this.replicatedSize = QuotaUtil.getReplicatedSize(getSize(), getReplicationConfig()); + } + + public static KeyEntityInfoProtoWrapper getFromProtobuf(OzoneManagerProtocolProtos.KeyInfo keyInfo) { + return new KeyEntityInfoProtoWrapper(keyInfo); + } + + public OzoneManagerProtocolProtos.KeyInfo toProtobuf() { + throw new UnsupportedOperationException("This method is not supported."); + } + + @JsonProperty("key") + public String getKey() { + if (key == null) { + throw new IllegalStateException("Key must be set to correctly serialize this object."); + } + return key; + } + + public void setKey(String key) { + this.key = key; + } + + @JsonProperty("path") + public String getPath() { + if (path == null) { + throw new IllegalStateException("Path must be set to correctly serialize this object."); + } + return path; + } + + public void setPath(String path) { + this.path = path; + } + + @JsonProperty("size") + public long getSize() { + return keyInfoProto.getDataSize(); + } + + @JsonProperty("replicatedSize") + public long getReplicatedSize() { + return replicatedSize; + } + + @JsonProperty("replicationInfo") + public ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + @JsonProperty("creationTime") + public long getCreationTime() { + return keyInfoProto.getCreationTime(); + } + + @JsonProperty("modificationTime") + public long getModificationTime() { + return keyInfoProto.getModificationTime(); + } + + @JsonProperty("isKey") + public boolean getIsKey() { + return keyInfoProto.getIsFile(); + } + + public long getParentId() { + return keyInfoProto.getParentID(); + } + + public String getVolumeName() { + return keyInfoProto.getVolumeName(); + } + + public String getBucketName() { + return keyInfoProto.getBucketName(); + } + + /** Returns the key name of the key stored in the OM Key Info object. */ + public String getKeyName() { + return keyInfoProto.getKeyName(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java index 7220060aeb0..2770e7f7f6f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java @@ -51,7 +51,7 @@ public class ListKeysResponse { /** list of keys. */ @JsonProperty("keys") - private List keys; + private List keys; public ListKeysResponse() { @@ -95,11 +95,11 @@ public void setPath(String path) { this.path = path; } - public List getKeys() { + public List getKeys() { return keys; } - public void setKeys(List keys) { + public void setKeys(List keys) { this.keys = keys; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java index 5ccfd988731..ccbaa35788c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java @@ -49,8 +49,8 @@ public class NamespaceSummaryResponse { * * @return Builder */ - public static NamespaceSummaryResponse.Builder newBuilder() { - return new NamespaceSummaryResponse.Builder(); + public static Builder newBuilder() { + return new Builder(); } public NamespaceSummaryResponse(Builder b) { @@ -104,7 +104,6 @@ public void setObjectDBInfo(ObjectDBInfo objectDBInfo) { /** * Builder for NamespaceSummaryResponse. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String path; private EntityType entityType; @@ -119,30 +118,30 @@ public Builder() { this.entityType = EntityType.ROOT; } - public NamespaceSummaryResponse.Builder setPath(String path) { + public Builder setPath(String path) { this.path = path; return this; } - public NamespaceSummaryResponse.Builder setEntityType( + public Builder setEntityType( EntityType entityType) { this.entityType = entityType; return this; } - public NamespaceSummaryResponse.Builder setCountStats( + public Builder setCountStats( CountStats countStats) { this.countStats = countStats; return this; } - public NamespaceSummaryResponse.Builder setObjectDBInfo( + public Builder setObjectDBInfo( ObjectDBInfo objectDBInfo) { this.objectDBInfo = objectDBInfo; return this; } - public NamespaceSummaryResponse.Builder setStatus( + public Builder setStatus( ResponseStatus status) { this.status = status; return this; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java index 345b0429076..e4bcea47b4d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import org.apache.hadoop.ozone.recon.ReconUtils; + +import java.util.TimeZone; + /** * Wrapper object for statistics of records of a page in API response. */ @@ -37,6 +41,8 @@ public class ParamInfo { */ private String creationDate; + private long creationDateEpoch = -1; + /** * */ @@ -87,6 +93,14 @@ public String getCreationDate() { return creationDate; } + public long getCreationDateEpoch() { + if (creationDateEpoch == -1) { + creationDateEpoch = ReconUtils.convertToEpochMillis( + getCreationDate(), "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); + } + return creationDateEpoch; + } + public String getReplicationType() { return replicationType; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java index d26f87f6f78..2a2f223bb95 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java @@ -133,7 +133,6 @@ private PipelineMetadata(Builder b) { /** * Builder for PipelineMetadata. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private UUID pipelineId; private PipelineState status; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java index ba03ec61f14..eaf08d9ca83 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java @@ -50,12 +50,6 @@ public class UnhealthyContainersResponse { @JsonProperty("misReplicatedCount") private long misReplicatedCount = 0; - /** - * Total count of containers with negative size. - */ - @JsonProperty("negativeSizeCount") - private long negativeSizeCount = 0; - /** * A collection of unhealthy containers. */ @@ -83,9 +77,6 @@ public void setSummaryCount(String state, long count) { } else if (state.equals( UnHealthyContainerStates.MIS_REPLICATED.toString())) { this.misReplicatedCount = count; - } else if (state.equals( - UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { - this.negativeSizeCount = count; } } @@ -105,10 +96,6 @@ public long getMisReplicatedCount() { return misReplicatedCount; } - public long getNegativeSizeCount() { - return negativeSizeCount; - } - public Collection getContainers() { return containers; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index f3b273451a2..aa6c5a765d1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -58,6 +58,11 @@ private NSSummaryCodec() { // singleton } + @Override + public Class getTypeClass() { + return NSSummary.class; + } + @Override public byte[] toPersistedFormat(NSSummary object) throws IOException { Set childDirs = object.getChildDir(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 639047d37bd..11af6eaff53 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -29,6 +29,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.PlacementPolicy; @@ -78,6 +79,8 @@ public class ContainerHealthTask extends ReconScmTask { private final ReconContainerMetadataManager reconContainerMetadataManager; private final PlacementPolicy placementPolicy; private final long interval; + private Map> + unhealthyContainerStateStatsMapForTesting; private final Set processedContainers = new HashSet<>(); @@ -185,10 +188,12 @@ private void checkAndProcessContainers( private void logUnhealthyContainerStats( Map> unhealthyContainerStateStatsMap) { + unhealthyContainerStateStatsMapForTesting = + new HashMap<>(unhealthyContainerStateStatsMap); // If any EMPTY_MISSING containers, then it is possible that such // containers got stuck in the closing state which never got // any replicas created on the datanodes. In this case, we log it as - // EMPTY, and insert as EMPTY_MISSING in UNHEALTHY_CONTAINERS table. + // EMPTY_MISSING in unhealthy container statistics but do not add it to the table. unhealthyContainerStateStatsMap.entrySet().forEach(stateEntry -> { UnHealthyContainerStates unhealthyContainerState = stateEntry.getKey(); Map containerStateStatsMap = stateEntry.getValue(); @@ -256,6 +261,11 @@ private void completeProcessingContainer( * completeProcessingContainer is called. This will check to see if any * additional records need to be added to the database. * + * If a container is identified as missing, empty-missing, under-replicated, + * over-replicated or mis-replicated, the method checks with SCM to determine + * if it has been deleted, using {@code containerDeletedInSCM}. If the container is + * deleted in SCM, the corresponding record is removed from Recon. + * * @param currentTime Timestamp to place on all records generated by this run * @param unhealthyContainerStateCountMap * @return Count of records processed @@ -273,9 +283,11 @@ private long processExistingDBRecords(long currentTime, recordCount++; UnhealthyContainersRecord rec = cursor.fetchNext(); try { + // Set the current container if it's not already set if (currentContainer == null) { currentContainer = setCurrentContainer(rec.getContainerId()); } + // If the container ID has changed, finish processing the previous one if (currentContainer.getContainerID() != rec.getContainerId()) { completeProcessingContainer( currentContainer, existingRecords, currentTime, @@ -283,24 +295,29 @@ private long processExistingDBRecords(long currentTime, existingRecords.clear(); currentContainer = setCurrentContainer(rec.getContainerId()); } - if (ContainerHealthRecords - .retainOrUpdateRecord(currentContainer, rec - )) { - // Check if the missing container is deleted in SCM - if (currentContainer.isMissing() && - containerDeletedInSCM(currentContainer.getContainer())) { - rec.delete(); - } - existingRecords.add(rec.getContainerState()); - if (rec.changed()) { - rec.update(); - } - } else { + + // Unhealthy Containers such as MISSING, UNDER_REPLICATED, + // OVER_REPLICATED, MIS_REPLICATED can have their unhealthy states changed or retained. + if (!ContainerHealthRecords.retainOrUpdateRecord(currentContainer, rec)) { + rec.delete(); LOG.info("DELETED existing unhealthy container record...for Container: {}", currentContainer.getContainerID()); + } + + // If the container is marked as MISSING and it's deleted in SCM, remove the record + if (currentContainer.isMissing() && containerDeletedInSCM(currentContainer.getContainer())) { rec.delete(); + LOG.info("DELETED existing unhealthy container record...for Container: {}", + currentContainer.getContainerID()); + } + + existingRecords.add(rec.getContainerState()); + // If the record was changed, update it + if (rec.changed()) { + rec.update(); } } catch (ContainerNotFoundException cnf) { + // If the container is not found, delete the record and reset currentContainer rec.delete(); currentContainer = null; } @@ -326,13 +343,6 @@ private void processContainer(ContainerInfo container, long currentTime, containerReplicas, placementPolicy, reconContainerMetadataManager, conf); - // Handle negative sized containers separately - if (h.getContainer().getUsedBytes() < 0) { - handleNegativeSizedContainers(h, currentTime, - unhealthyContainerStateStatsMap); - return; - } - if (h.isHealthilyReplicated() || h.isDeleted()) { return; } @@ -349,6 +359,18 @@ private void processContainer(ContainerInfo container, long currentTime, } } + /** + * Ensures the container's state in Recon is updated to match its state in SCM. + * + * If SCM reports the container as DELETED, this method attempts to transition + * the container's state in Recon from CLOSED to DELETING, or from DELETING to + * DELETED, based on the current state in Recon. It logs each transition attempt + * and handles any exceptions that may occur. + * + * @param containerInfo the container whose state is being checked and potentially updated. + * @return {@code true} if the container was found to be DELETED in SCM and the + * state transition was attempted in Recon; {@code false} otherwise. + */ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { try { ContainerWithPipeline containerWithPipeline = @@ -358,6 +380,8 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { if (containerInfo.getState() == HddsProtos.LifeCycleState.CLOSED) { containerManager.updateContainerState(containerInfo.containerID(), HddsProtos.LifeCycleEvent.DELETE); + LOG.debug("Successfully changed container {} state from CLOSED to DELETING.", + containerInfo.containerID()); } if (containerInfo.getState() == HddsProtos.LifeCycleState.DELETING && containerManager.getContainerReplicas(containerInfo.containerID()) @@ -365,6 +389,7 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { ) { containerManager.updateContainerState(containerInfo.containerID(), HddsProtos.LifeCycleEvent.CLEANUP); + LOG.info("Successfully Deleted container {} from Recon.", containerInfo.containerID()); } return true; } @@ -380,28 +405,50 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { /** * This method is used to handle containers with negative sizes. It logs an - * error message and inserts a record into the UNHEALTHY_CONTAINERS table. + * error message. * @param containerHealthStatus * @param currentTime * @param unhealthyContainerStateStatsMap */ - private void handleNegativeSizedContainers( + private static void handleNegativeSizedContainers( ContainerHealthStatus containerHealthStatus, long currentTime, Map> unhealthyContainerStateStatsMap) { + // NEGATIVE_SIZE containers are also not inserted into the database. + // This condition usually arises due to corrupted or invalid metadata, where + // the container's size is inaccurately recorded as negative. Since this does not + // represent a typical unhealthy scenario and may not have any meaningful + // impact on system health, such containers are logged for investigation but + // excluded from the UNHEALTHY_CONTAINERS table to maintain data integrity. ContainerInfo container = containerHealthStatus.getContainer(); - LOG.error( - "Container {} has negative size. Please visit Recon's unhealthy " + - "container endpoint for more details.", - container.getContainerID()); - UnhealthyContainers record = - ContainerHealthRecords.recordForState(containerHealthStatus, - UnHealthyContainerStates.NEGATIVE_SIZE, currentTime); - List records = Collections.singletonList(record); - populateContainerStats(containerHealthStatus, - UnHealthyContainerStates.NEGATIVE_SIZE, + LOG.error("Container {} has negative size.", container.getContainerID()); + populateContainerStats(containerHealthStatus, UnHealthyContainerStates.NEGATIVE_SIZE, + unhealthyContainerStateStatsMap); + } + + /** + * This method is used to handle containers that are empty and missing. It logs + * a debug message. + * @param containerHealthStatus + * @param currentTime + * @param unhealthyContainerStateStatsMap + */ + private static void handleEmptyMissingContainers( + ContainerHealthStatus containerHealthStatus, long currentTime, + Map> + unhealthyContainerStateStatsMap) { + // EMPTY_MISSING containers are not inserted into the database. + // These containers typically represent those that were never written to + // or remain in an incomplete state. Tracking such containers as unhealthy + // would not provide valuable insights since they don't pose a risk or issue + // to the system. Instead, they are logged for awareness, but not stored in + // the UNHEALTHY_CONTAINERS table to avoid unnecessary entries. + ContainerInfo container = containerHealthStatus.getContainer(); + LOG.debug("Empty container {} is missing. It will be logged in the " + + "unhealthy container statistics, but no record will be created in the " + + "UNHEALTHY_CONTAINERS table.", container.getContainerID()); + populateContainerStats(containerHealthStatus, EMPTY_MISSING, unhealthyContainerStateStatsMap); - containerHealthSchemaManager.insertUnhealthyContainerRecords(records); } /** @@ -492,22 +539,21 @@ public static List generateUnhealthyRecords( populateContainerStats(container, UnHealthyContainerStates.MISSING, unhealthyContainerStateStatsMap); } else { - - LOG.debug("Empty container {} is missing. Kindly check the " + - "consolidated container stats per UNHEALTHY state logged as " + - "starting with **Container State Stats:**"); - - records.add( - recordForState(container, EMPTY_MISSING, - time)); - populateContainerStats(container, - EMPTY_MISSING, + handleEmptyMissingContainers(container, time, unhealthyContainerStateStatsMap); } // A container cannot have any other records if it is missing so return return records; } + // For Negative sized containers we only log but not insert into DB + if (container.getContainer().getUsedBytes() < 0 + && !recordForStateExists.contains( + UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { + handleNegativeSizedContainers(container, time, + unhealthyContainerStateStatsMap); + } + if (container.isUnderReplicated() && !recordForStateExists.contains( UnHealthyContainerStates.UNDER_REPLICATED.toString())) { @@ -650,4 +696,23 @@ private static void populateContainerStats( (value + container.getContainer().getUsedBytes())); } } + + /** + * Expose the logger for testing purposes. + * + * @return the logger instance + */ + @VisibleForTesting + public Logger getLogger() { + return LOG; + } + + /** + * Expose the unhealthyContainerStateStatsMap for testing purposes. + */ + @VisibleForTesting + public Map> getUnhealthyContainerStateStatsMap() { + return unhealthyContainerStateStatsMapForTesting; + } + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java index 0c13376fa52..9ccc09d8d03 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.recon.persistence; import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.UNDER_REPLICATED; -import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_BAD; import static org.hadoop.ozone.recon.schema.tables.UnhealthyContainersTable.UNHEALTHY_CONTAINERS; import static org.jooq.impl.DSL.count; @@ -76,7 +76,7 @@ public List getUnhealthyContainers( SelectQuery query = dslContext.selectQuery(); query.addFrom(UNHEALTHY_CONTAINERS); if (state != null) { - if (state.equals(ALL_REPLICAS_UNHEALTHY)) { + if (state.equals(ALL_REPLICAS_BAD)) { query.addConditions(UNHEALTHY_CONTAINERS.CONTAINER_STATE .eq(UNDER_REPLICATED.toString())); query.addConditions(UNHEALTHY_CONTAINERS.ACTUAL_REPLICA_COUNT.eq(0)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 1fc114eabd7..82913f453d0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -23,9 +23,12 @@ import java.util.List; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; /** * Interface for the OM Metadata Manager + DB store maintained by @@ -109,8 +112,17 @@ List listBucketsUnderVolume( /** * Return the OzoneConfiguration instance used by Recon. - * @return + * @return OzoneConfiguration */ OzoneConfiguration getOzoneConfiguration(); + /** + * A lighter weight version of the getKeyTable method that only returns the KeyEntityInfo wrapper object. This + * avoids creating a full OMKeyInfo object for each key if it is not needed. + * @param bucketLayout The Bucket layout to use for the key table. + * @return A table of keys and their metadata. + * @throws IOException + */ + Table getKeyTableLite(BucketLayout bucketLayout) throws IOException; + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index 91cb61369fc..f750a0abb6a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -41,9 +41,11 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,6 +99,7 @@ private void initializeNewRdbStore(File dbFile) throws IOException { .setName(dbFile.getName()) .setPath(dbFile.toPath().getParent()); addOMTablesAndCodecs(dbStoreBuilder); + dbStoreBuilder.addCodec(KeyEntityInfoProtoWrapper.class, KeyEntityInfoProtoWrapper.getCodec()); setStore(dbStoreBuilder.build()); LOG.info("Created OM DB handle from snapshot at {}.", dbFile.getAbsolutePath()); @@ -109,6 +112,12 @@ private void initializeNewRdbStore(File dbFile) throws IOException { } } + @Override + public Table getKeyTableLite(BucketLayout bucketLayout) throws IOException { + String tableName = bucketLayout.isFileSystemOptimized() ? FILE_TABLE : KEY_TABLE; + return getStore().getTable(tableName, String.class, KeyEntityInfoProtoWrapper.class); + } + @Override public void updateOmDB(File newDbLocation) throws IOException { if (getStore() != null) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java index 5895d3e133c..7afed9c1ce9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java @@ -34,11 +34,11 @@ * For Recon DB table definition. */ public class ContainerReplicaHistoryList { - private static final Codec CODEC - = new DelegatedCodec<>(Proto2Codec.get( - ContainerReplicaHistoryListProto.getDefaultInstance()), + private static final Codec CODEC = new DelegatedCodec<>( + Proto2Codec.get(ContainerReplicaHistoryListProto.getDefaultInstance()), ContainerReplicaHistoryList::fromProto, - ContainerReplicaHistoryList::toProto); + ContainerReplicaHistoryList::toProto, + ContainerReplicaHistoryList.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 65a9530c5ca..2ebeafcccb9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -205,17 +205,6 @@ public String getRevision(DatanodeDetails datanodeDetails) { EMPTY_DATANODE_DETAILS).getRevision(); } - /** - * Returns the build date of the given node. - * - * @param datanodeDetails DatanodeDetails - * @return buildDate - */ - public String getBuildDate(DatanodeDetails datanodeDetails) { - return inMemDatanodeDetails.getOrDefault(datanodeDetails.getUuid(), - EMPTY_DATANODE_DETAILS).getBuildDate(); - } - @Override public void onMessage(CommandForDatanode commandForDatanode, EventPublisher ignored) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java index 1ea2f7b1312..4970d5da915 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java @@ -32,27 +32,27 @@ /** * Recon SCM db file for ozone. */ -public class ReconSCMDBDefinition extends SCMDBDefinition { +public final class ReconSCMDBDefinition extends SCMDBDefinition { private static final Codec UUID_CODEC = new DelegatedCodec<>( StringCodec.get(), UUID::fromString, UUID::toString, - DelegatedCodec.CopyType.SHALLOW); + UUID.class, DelegatedCodec.CopyType.SHALLOW); public static final String RECON_SCM_DB_NAME = "recon-scm.db"; - public static final DBColumnFamilyDefinition - NODES = - new DBColumnFamilyDefinition( - "nodes", - UUID.class, - UUID_CODEC, - DatanodeDetails.class, - DatanodeDetails.getCodec()); + public static final DBColumnFamilyDefinition NODES + = new DBColumnFamilyDefinition<>("nodes", UUID_CODEC, DatanodeDetails.getCodec()); private static final Map> COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( - new SCMDBDefinition().getMap(), NODES); + SCMDBDefinition.get().getMap(), NODES); - public ReconSCMDBDefinition() { + private static final ReconSCMDBDefinition INSTANCE = new ReconSCMDBDefinition(); + + public static ReconSCMDBDefinition get() { + return INSTANCE; + } + + private ReconSCMDBDefinition() { super(COLUMN_FAMILIES); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index a7f486ea5ac..eff68848a2f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -131,6 +131,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.sql.DataSource; + /** * Recon's 'lite' version of SCM. */ @@ -155,6 +157,8 @@ public class ReconStorageContainerManagerFacade private final SCMNodeDetails reconNodeDetails; private final SCMHAManager scmhaManager; private final SequenceIdGenerator sequenceIdGen; + private final ContainerHealthTask containerHealthTask; + private final DataSource dataSource; private DBStore dbStore; private ReconNodeManager nodeManager; @@ -187,7 +191,8 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, ReconContainerMetadataManager reconContainerMetadataManager, ReconUtils reconUtils, ReconSafeModeManager safeModeManager, - ReconContext reconContext) throws IOException { + ReconContext reconContext, + DataSource dataSource) throws IOException { reconNodeDetails = reconUtils.getReconNodeDetails(conf); this.threadNamePrefix = reconNodeDetails.threadNamePrefix(); this.eventQueue = new EventQueue(threadNamePrefix); @@ -217,8 +222,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, this.scmStorageConfig = new ReconStorageConfig(conf, reconUtils); this.clusterMap = new NetworkTopologyImpl(conf); - this.dbStore = DBStoreBuilder - .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition()); + this.dbStore = DBStoreBuilder.createDBStore(ozoneConfiguration, ReconSCMDBDefinition.get()); this.scmLayoutVersionManager = new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion()); @@ -272,7 +276,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, scmServiceProvider, reconTaskStatusDao, reconTaskConfig); - ContainerHealthTask containerHealthTask = new ContainerHealthTask( + containerHealthTask = new ContainerHealthTask( containerManager, scmServiceProvider, reconTaskStatusDao, containerHealthSchemaManager, containerPlacementPolicy, reconTaskConfig, reconContainerMetadataManager, conf); @@ -285,6 +289,8 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, containerCountBySizeDao, utilizationSchemaDefinition); + this.dataSource = dataSource; + StaleNodeHandler staleNodeHandler = new ReconStaleNodeHandler(nodeManager, pipelineManager, conf, pipelineSyncTask); @@ -626,8 +632,7 @@ private void deleteOldSCMDB() throws IOException { private void initializeNewRdbStore(File dbFile) throws IOException { try { - DBStore newStore = createDBAndAddSCMTablesAndCodecs( - dbFile, new ReconSCMDBDefinition()); + final DBStore newStore = createDBAndAddSCMTablesAndCodecs(dbFile, ReconSCMDBDefinition.get()); Table nodeTable = ReconSCMDBDefinition.NODES.getTable(dbStore); Table newNodeTable = @@ -741,6 +746,12 @@ public StorageContainerServiceProvider getScmServiceProvider() { public ContainerSizeCountTask getContainerSizeCountTask() { return containerSizeCountTask; } + + @VisibleForTesting + public ContainerHealthTask getContainerHealthTask() { + return containerHealthTask; + } + @VisibleForTesting public ContainerCountBySizeDao getContainerCountBySizeDao() { return containerCountBySizeDao; @@ -749,4 +760,8 @@ public ContainerCountBySizeDao getContainerCountBySizeDao() { public ReconContext getReconContext() { return reconContext; } + + public DataSource getDataSource() { + return dataSource; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java index 59957e11624..44595a43b79 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java @@ -70,7 +70,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, Integer count) throws IOException; /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -80,7 +80,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, void storeContainerKeyCount(Long containerID, Long count) throws IOException; /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch operation we store into * @param containerID the containerID. @@ -91,7 +91,7 @@ void batchStoreContainerKeyCounts(BatchOperation batch, Long containerID, Long count) throws IOException; /** - * Store the containerID -> ContainerReplicaWithTimestamp mapping to the + * Store the containerID -> ContainerReplicaWithTimestamp mapping to the * container DB store. * * @param containerID the containerID. @@ -159,7 +159,7 @@ Map getContainerReplicaHistory( * Get the stored key prefixes for the given containerId. * * @param containerId the given containerId. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId) throws IOException; @@ -170,19 +170,19 @@ Map getKeyPrefixesForContainer( * * @param containerId the given containerId. * @param prevKeyPrefix the key prefix to seek to and start scanning. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId, String prevKeyPrefix) throws IOException; /** * Get a Map of containerID, containerMetadata of Containers only for the - * given limit. If the limit is -1 or any integer <0, then return all + * given limit. If the limit is -1 or any integer < 0, then return all * the containers without any limit. * * @param limit the no. of containers to fetch. * @param prevContainer containerID after which the results are returned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException */ Map getContainers(int limit, long prevContainer) @@ -256,7 +256,7 @@ void commitBatchOperation(RDBBatchOperation rdbBatchOperation) * * @param prevKeyPrefix the key prefix to seek to and start scanning. * @param keyVersion the key version to seek - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getContainerForKeyPrefixes( String prevKeyPrefix, long keyVersion) throws IOException; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java index 01a630a5235..500c01bfde2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java @@ -18,9 +18,6 @@ package org.apache.hadoop.ozone.recon.spi.impl; -import static org.apache.commons.compress.utils.CharsetNames.UTF_8; - -import java.io.IOException; import java.nio.ByteBuffer; import org.apache.commons.lang3.ArrayUtils; @@ -31,6 +28,8 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Codec to serialize/deserialize {@link ContainerKeyPrefix}. */ @@ -51,8 +50,12 @@ private ContainerKeyPrefixCodec() { } @Override - public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) - throws IOException { + public Class getTypeClass() { + return ContainerKeyPrefix.class; + } + + @Override + public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) { Preconditions.checkNotNull(containerKeyPrefix, "Null object can't be converted to byte array."); byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix @@ -76,9 +79,7 @@ public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) } @Override - public ContainerKeyPrefix fromPersistedFormat(byte[] rawData) - throws IOException { - + public ContainerKeyPrefix fromPersistedFormat(byte[] rawData) { // First 8 bytes is the containerId. long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray( rawData, 0, Long.BYTES)).getLong(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java index 7baca152b28..70b1d65837c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java @@ -24,10 +24,9 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer; -import java.io.IOException; import java.nio.ByteBuffer; -import static org.apache.commons.compress.utils.CharsetNames.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; /** * Codec to serialize/deserialize {@link KeyPrefixContainer}. @@ -49,8 +48,12 @@ private KeyPrefixContainerCodec() { private static final String KEY_DELIMITER = "_"; @Override - public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) - throws IOException { + public Class getTypeClass() { + return KeyPrefixContainer.class; + } + + @Override + public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) { Preconditions.checkNotNull(keyPrefixContainer, "Null object can't be converted to byte array."); byte[] keyPrefixBytes = keyPrefixContainer.getKeyPrefix().getBytes(UTF_8); @@ -75,9 +78,7 @@ public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) } @Override - public KeyPrefixContainer fromPersistedFormat(byte[] rawData) - throws IOException { - + public KeyPrefixContainer fromPersistedFormat(byte[] rawData) { // When reading from byte[], we can always expect to have the key, version // and version parts in the byte array. byte[] keyBytes = ArrayUtils.subarray(rawData, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java index 46b75e45fad..42908a775a4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java @@ -191,7 +191,7 @@ public void batchStoreContainerKeyMapping(BatchOperation batch, } /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -204,7 +204,7 @@ public void storeContainerKeyCount(Long containerID, Long count) } /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch we store into * @param containerID the containerID. @@ -219,7 +219,7 @@ public void batchStoreContainerKeyCounts(BatchOperation batch, } /** - * Store the ContainerID -> ContainerReplicaHistory (container first and last + * Store the ContainerID -> ContainerReplicaHistory (container first and last * seen time) mapping to the container DB store. * * @param containerID the containerID. @@ -417,16 +417,16 @@ public Map getKeyPrefixesForContainer( } /** - * Iterate the DB to construct a Map of containerID -> containerMetadata + * Iterate the DB to construct a Map of containerID -> containerMetadata * only for the given limit from the given start key. The start containerID * is skipped from the result. * - * Return all the containers if limit < 0. + * Return all the containers if limit < 0. * * @param limit No of containers to get. * @param prevContainer containerID after which the * list of containers are scanned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException on failure. */ @Override diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java index 8cb3b4188ed..cde24d7570b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java @@ -46,44 +46,34 @@ public ReconDBDefinition(String dbName) { CONTAINER_KEY = new DBColumnFamilyDefinition<>( "containerKeyTable", - ContainerKeyPrefix.class, ContainerKeyPrefixCodec.get(), - Integer.class, IntegerCodec.get()); public static final DBColumnFamilyDefinition KEY_CONTAINER = new DBColumnFamilyDefinition<>( "keyContainerTable", - KeyPrefixContainer.class, KeyPrefixContainerCodec.get(), - Integer.class, IntegerCodec.get()); public static final DBColumnFamilyDefinition CONTAINER_KEY_COUNT = new DBColumnFamilyDefinition<>( "containerKeyCountTable", - Long.class, LongCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition REPLICA_HISTORY = new DBColumnFamilyDefinition( "replica_history", - Long.class, LongCodec.get(), - ContainerReplicaHistoryList.class, ContainerReplicaHistoryList.getCodec()); public static final DBColumnFamilyDefinition NAMESPACE_SUMMARY = new DBColumnFamilyDefinition( "namespaceSummaryTable", - Long.class, LongCodec.get(), - NSSummary.class, NSSummaryCodec.get()); // Container Replica History with bcsId tracking. @@ -91,9 +81,7 @@ public ReconDBDefinition(String dbName) { REPLICA_HISTORY_V2 = new DBColumnFamilyDefinition( "replica_history_v2", - Long.class, LongCodec.get(), - ContainerReplicaHistoryList.class, ContainerReplicaHistoryList.getCodec()); private static final Map> diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java index fd5d8864080..bf34c9f8930 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java @@ -57,7 +57,7 @@ /** * Class to iterate over the OM DB and populate the Recon container DB with - * the container -> Key reverse mapping. + * the container -> Key reverse mapping. */ public class ContainerKeyMapperTask implements ReconOmTask { @@ -81,8 +81,8 @@ public ContainerKeyMapperTask(ReconContainerMetadataManager } /** - * Read Key -> ContainerId data from OM snapshot DB and write reverse map - * (container, key) -> count to Recon Container DB. + * Read Key -> ContainerId data from OM snapshot DB and write reverse map + * (container, key) -> count to Recon Container DB. */ @Override public Pair reprocess(OMMetadataManager omMetadataManager) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java index 41e6bf962a7..d1f98c49bdc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java @@ -49,14 +49,12 @@ public class OMDBUpdatesHandler extends ManagedWriteBatch.Handler { private OMMetadataManager omMetadataManager; private List omdbUpdateEvents = new ArrayList<>(); private Map> omdbLatestUpdateEvents = new HashMap<>(); - private OMDBDefinition omdbDefinition; - private OmUpdateEventValidator omUpdateEventValidator; + private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); + private final OmUpdateEventValidator omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition); public OMDBUpdatesHandler(OMMetadataManager metadataManager) { omMetadataManager = metadataManager; tablesNames = metadataManager.getStore().getTableNames(); - omdbDefinition = new OMDBDefinition(); - omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition); } @Override diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java index 3c7ce844e9c..b5a690f5eb4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java @@ -23,7 +23,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; /** * OmUpdateEventValidator is a utility class for validating OMDBUpdateEvents @@ -48,7 +47,6 @@ public OmUpdateEventValidator(OMDBDefinition omdbDefinition) { * @param keyType the key type of the event. * @param action the action performed on the event. * @return true if the event is valid, false otherwise. - * @throws IOException if an I/O error occurs during the validation. */ public boolean isValidEvent(String tableName, Object actualValueType, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java index e904334bb31..2092d6a326c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java @@ -35,14 +35,14 @@ public interface ReconOmTask { /** * Process a set of OM events on tables that the task is listening on. * @param events Set of events to be processed by the task. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair process(OMUpdateEventBatch events); /** * Process a on tables that the task is listening on. * @param omMetadataManager OM Metadata manager instance. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair reprocess(OMMetadataManager omMetadataManager); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java index d66a7279cce..1a514ceb90b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java @@ -53,7 +53,7 @@ void reInitializeTasks(ReconOMMetadataManager omMetadataManager) /** * Get set of registered tasks. - * @return Map of Task name -> Task. + * @return Map of Task name -> Task. */ Map getRegisteredTasks(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java new file mode 100644 index 00000000000..e75efd2116a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; + +import static org.apache.hadoop.ozone.recon.upgrade.ReconLayoutFeature.INITIAL_VERSION; +import static org.apache.hadoop.ozone.recon.upgrade.ReconUpgradeAction.UpgradeActionType.FINALIZE; +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.TABLE_EXISTS_CHECK; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME; +import static org.jooq.impl.DSL.field; +import static org.jooq.impl.DSL.name; + +/** + * Upgrade action for the INITIAL schema version, which manages constraints + * for the UNHEALTHY_CONTAINERS table. + */ +@UpgradeActionRecon(feature = INITIAL_VERSION, type = FINALIZE) +public class InitialConstraintUpgradeAction implements ReconUpgradeAction { + + private static final Logger LOG = LoggerFactory.getLogger(InitialConstraintUpgradeAction.class); + private DataSource dataSource; + private DSLContext dslContext; + + @Override + public void execute(ReconStorageContainerManagerFacade scmFacade) throws SQLException { + this.dataSource = scmFacade.getDataSource(); + try (Connection conn = dataSource.getConnection()) { + if (!TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) { + return; + } + dslContext = DSL.using(conn); + // Drop the existing constraint + dropConstraint(); + // Add the updated constraint with all enum states + addUpdatedConstraint(); + } catch (SQLException e) { + throw new SQLException("Failed to execute InitialConstraintUpgradeAction", e); + } + } + + /** + * Drops the existing constraint from the UNHEALTHY_CONTAINERS table. + */ + private void dropConstraint() { + String constraintName = UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1"; + dslContext.alterTable(UNHEALTHY_CONTAINERS_TABLE_NAME) + .dropConstraint(constraintName) + .execute(); + LOG.debug("Dropped the existing constraint: {}", constraintName); + } + + /** + * Adds the updated constraint directly within this class. + */ + private void addUpdatedConstraint() { + String[] enumStates = Arrays + .stream(ContainerSchemaDefinition.UnHealthyContainerStates.values()) + .map(Enum::name) + .toArray(String[]::new); + + dslContext.alterTable(ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME) + .add(DSL.constraint(ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1") + .check(field(name("container_state")) + .in(enumStates))) + .execute(); + + LOG.info("Added the updated constraint to the UNHEALTHY_CONTAINERS table for enum state values: {}", + Arrays.toString(enumStates)); + } + + @Override + public UpgradeActionType getType() { + return FINALIZE; + } + + @VisibleForTesting + public void setDataSource(DataSource dataSource) { + this.dataSource = dataSource; + } + + @VisibleForTesting + public void setDslContext(DSLContext dslContext) { + this.dslContext = dslContext; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java new file mode 100644 index 00000000000..52739efe1a6 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.reflections.Reflections; + +import java.util.Arrays; +import java.util.EnumMap; +import java.util.Optional; +import java.util.Set; + +/** + * Enum representing Recon layout features with their version, description, + * and associated upgrade action to be executed during an upgrade. + */ +public enum ReconLayoutFeature { + // Represents the starting point for Recon's layout versioning system. + INITIAL_VERSION(0, "Recon Layout Versioning Introduction"); + + private final int version; + private final String description; + private final EnumMap actions = + new EnumMap<>(ReconUpgradeAction.UpgradeActionType.class); + + ReconLayoutFeature(final int version, String description) { + this.version = version; + this.description = description; + } + + public int getVersion() { + return version; + } + + public String getDescription() { + return description; + } + + /** + * Retrieves the upgrade action for the specified {@link ReconUpgradeAction.UpgradeActionType}. + * + * @param type The type of the upgrade action (e.g., FINALIZE). + * @return An {@link Optional} containing the upgrade action if present. + */ + public Optional getAction(ReconUpgradeAction.UpgradeActionType type) { + return Optional.ofNullable(actions.get(type)); + } + + /** + * Associates a given upgrade action with a specific upgrade phase for this feature. + * + * @param type The phase/type of the upgrade action. + * @param action The upgrade action to associate with this feature. + */ + public void addAction(ReconUpgradeAction.UpgradeActionType type, ReconUpgradeAction action) { + actions.put(type, action); + } + + /** + * Scans the classpath for all classes annotated with {@link UpgradeActionRecon} + * and registers their upgrade actions for the corresponding feature and phase. + * This method dynamically loads and registers all upgrade actions based on their + * annotations. + */ + public static void registerUpgradeActions() { + Reflections reflections = new Reflections("org.apache.hadoop.ozone.recon.upgrade"); + Set> actionClasses = reflections.getTypesAnnotatedWith(UpgradeActionRecon.class); + + for (Class actionClass : actionClasses) { + try { + ReconUpgradeAction action = (ReconUpgradeAction) actionClass.getDeclaredConstructor().newInstance(); + UpgradeActionRecon annotation = actionClass.getAnnotation(UpgradeActionRecon.class); + annotation.feature().addAction(annotation.type(), action); + } catch (Exception e) { + throw new RuntimeException("Failed to register upgrade action: " + actionClass.getSimpleName(), e); + } + } + } + + /** + * Determines the Software Layout Version (SLV) based on the latest feature version. + * @return The Software Layout Version (SLV). + */ + public static int determineSLV() { + return Arrays.stream(ReconLayoutFeature.values()) + .mapToInt(ReconLayoutFeature::getVersion) + .max() + .orElse(0); // Default to 0 if no features are defined + } + + /** + * Returns the list of all layout feature values. + * + * @return An array of all {@link ReconLayoutFeature} values. + */ + public static ReconLayoutFeature[] getValues() { + return ReconLayoutFeature.values(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutVersionManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutVersionManager.java new file mode 100644 index 00000000000..050127fb751 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutVersionManager.java @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.ReconContext; +import org.apache.hadoop.ozone.recon.ReconSchemaVersionTableManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +/** + * ReconLayoutVersionManager is responsible for managing the layout version of the Recon service. + * It determines the current Metadata Layout Version (MLV) and Software Layout Version (SLV) of the + * Recon service, and finalizes the layout features that need to be upgraded. + */ +public class ReconLayoutVersionManager { + + private static final Logger LOG = LoggerFactory.getLogger(ReconLayoutVersionManager.class); + + private final ReconSchemaVersionTableManager schemaVersionTableManager; + private final ReconContext reconContext; + + // Metadata Layout Version (MLV) of the Recon Metadata on disk + private int currentMLV; + + public ReconLayoutVersionManager(ReconSchemaVersionTableManager schemaVersionTableManager, + ReconContext reconContext) + throws SQLException { + this.schemaVersionTableManager = schemaVersionTableManager; + this.currentMLV = determineMLV(); + this.reconContext = reconContext; + ReconLayoutFeature.registerUpgradeActions(); // Register actions via annotation + } + + /** + * Determines the current Metadata Layout Version (MLV) from the version table. + * @return The current Metadata Layout Version (MLV). + */ + private int determineMLV() throws SQLException { + return schemaVersionTableManager.getCurrentSchemaVersion(); + } + + /** + * Determines the Software Layout Version (SLV) based on the latest feature version. + * @return The Software Layout Version (SLV). + */ + private int determineSLV() { + return Arrays.stream(ReconLayoutFeature.values()) + .mapToInt(ReconLayoutFeature::getVersion) + .max() + .orElse(0); + } + + /** + * Finalizes the layout features that need to be upgraded, by executing the upgrade action for each + * feature that is registered for finalization. + */ + public void finalizeLayoutFeatures(ReconStorageContainerManagerFacade scmFacade) { + // Get features that need finalization, sorted by version + List featuresToFinalize = getRegisteredFeatures(); + + try (Connection connection = scmFacade.getDataSource().getConnection()) { + connection.setAutoCommit(false); // Turn off auto-commit for transactional control + + for (ReconLayoutFeature feature : featuresToFinalize) { + try { + // Fetch only the FINALIZE action for the feature + Optional action = feature.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE); + if (action.isPresent()) { + // Update the schema version in the database + updateSchemaVersion(feature.getVersion(), connection); + + // Execute the upgrade action + action.get().execute(scmFacade); + + // Commit the transaction only if both operations succeed + connection.commit(); + LOG.info("Feature versioned {} finalized successfully.", feature.getVersion()); + } + } catch (Exception e) { + // Rollback any pending changes for the current feature due to failure + connection.rollback(); + currentMLV = determineMLV(); // Rollback the MLV to the original value + LOG.error("Failed to finalize feature {}. Rolling back changes.", feature.getVersion(), e); + throw e; + } + } + } catch (Exception e) { + // Log the error to both logs and ReconContext + LOG.error("Failed to finalize layout features: {}", e.getMessage()); + reconContext.updateErrors(ReconContext.ErrorCode.UPGRADE_FAILURE); + reconContext.updateHealthStatus(new AtomicBoolean(false)); + throw new RuntimeException("Recon failed to finalize layout features. Startup halted.", e); + } + } + + + /** + * Returns a list of ReconLayoutFeature objects that are registered for finalization. + */ + protected List getRegisteredFeatures() { + List allFeatures = + Arrays.asList(ReconLayoutFeature.values()); + + LOG.info("Current MLV: {}. SLV: {}. Checking features for registration...", currentMLV, determineSLV()); + + List registeredFeatures = allFeatures.stream() + .filter(feature -> feature.getVersion() > currentMLV) + .sorted((a, b) -> Integer.compare(a.getVersion(), b.getVersion())) // Sort by version in ascending order + .collect(Collectors.toList()); + + return registeredFeatures; + } + + /** + * Updates the Metadata Layout Version (MLV) in the database after finalizing a feature. + * This method uses the provided connection to ensure transactional consistency. + * + * @param newVersion The new Metadata Layout Version (MLV) to set. + * @param connection The database connection to use for the update operation. + */ + private void updateSchemaVersion(int newVersion, Connection connection) { + schemaVersionTableManager.updateSchemaVersion(newVersion, connection); + this.currentMLV = newVersion; + LOG.info("MLV updated to: " + newVersion); + } + + public int getCurrentMLV() { + return currentMLV; + } + + public int getCurrentSLV() { + return determineSLV(); + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconUpgradeAction.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconUpgradeAction.java new file mode 100644 index 00000000000..d5fdbdacb7c --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconUpgradeAction.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; + +/** + * ReconUpgradeAction is an interface for executing upgrade actions in Recon. + */ +public interface ReconUpgradeAction { + + /** + * Defines the different phases during which upgrade actions can be executed. + * Each action type corresponds to a specific point in the upgrade process: + * + * - FINALIZE: This action is executed automatically during the startup + * of Recon when it finalizes the layout upgrade. It ensures that all necessary + * upgrades or schema changes are applied to bring the system in sync with + * the latest version. + */ + enum UpgradeActionType { + FINALIZE + } + + /** + * Execute the upgrade action. + */ + void execute(ReconStorageContainerManagerFacade scmFacade) throws Exception; + + /** + * Provides the type of upgrade phase (e.g., FINALIZE). + */ + UpgradeActionType getType(); +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UpgradeActionRecon.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UpgradeActionRecon.java new file mode 100644 index 00000000000..11a6c16e198 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UpgradeActionRecon.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * The {@code UpgradeActionRecon} annotation is used to specify + * upgrade actions that should be executed during particular phases + * of the Recon service layout upgrade process. + * + *

    This annotation can be used to associate an upgrade action + * class with a specific layout feature and upgrade phase. The + * framework will dynamically discover these annotated upgrade + * actions and execute them based on the feature's version and + * the defined action type (e.g., {@link ReconUpgradeAction.UpgradeActionType#FINALIZE}). + * + *

    The annotation is retained at runtime, allowing the reflection-based + * mechanism to scan for annotated classes, register the associated actions, + * and execute them as necessary during the layout upgrade process. + * + * Example usage: + * + *

    + * @UpgradeActionRecon(feature = FEATURE_NAME, type = FINALIZE)
    + * public class FeatureNameUpgradeAction implements ReconUpgradeAction {
    + *   @Override
    + *   public void execute() throws Exception {
    + *     // Custom upgrade logic for FEATURE_1
    + *   }
    + * }
    + * 
    + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface UpgradeActionRecon { + + /** + * Defines the layout feature this upgrade action is associated with. + */ + ReconLayoutFeature feature(); + + /** + * Defines the type of upgrade phase during which the action should be executed. + */ + ReconUpgradeAction.UpgradeActionType type(); +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/package-info.java new file mode 100644 index 00000000000..56a94b1f84a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/package-info.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * This package contains classes and interfaces for handling + * upgrade actions in Apache Ozone Recon. + * + * The main interface {@link org.apache.hadoop.ozone.recon.upgrade.ReconUpgradeAction} + * defines the structure for actions that need to be executed during an upgrade + * process in Recon. The actions can be triggered automatically + * during startup to ensure the correct version of the schema or + * layout is applied. + */ +package org.apache.hadoop.ozone.recon.upgrade; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 8cfb23ad685..784ee8302e3 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -1480,7 +1480,7 @@ "path": "/dummyVolume/dummyBucket", "size": 200000, "sizeWithReplica": -1, - "subPathCount": 5, + "subPathCount": 8, "subPaths": [ { "path": "/dummyVolume/dummyBucket/dir1", @@ -1923,7 +1923,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 1, "reason": null, - "keys": 1, + "keys": 4, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -1997,7 +1997,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 3, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2071,7 +2071,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 2, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2108,7 +2108,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 5, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2145,7 +2145,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 3, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2182,7 +2182,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 6, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a2", "replicas": [ { @@ -2219,7 +2219,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 2, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a3", "replicas": [ { @@ -5169,438 +5169,317 @@ ] }, "nonFSO": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, + "lastKey": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/19/113328137261088807", + "replicatedDataSize": 31457280, + "unreplicatedDataSize": 10485760, "nonFSO": [ { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/01/110569623850191713", - "path": "nonfso 1", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2439/110569623850191714", - "path": "nonfso 2", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191715", - "path": "nonfso 11", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2441/110569623850191716", - "path": "nonfso 12", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191717", - "path": "nonfso 21", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2441/110569623850191718", - "path": "nonfso 22", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - } - ], - "status": "OK" - }, - "fso": { - "fso": [ - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2401/110569623850191713", - "path": "1", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2402/110569623850191714", - "path": "2", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2403/110569623850191715", - "path": "3", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2404/110569623850191716", - "path": "4", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2405/110569623850191717", - "path": "5", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2406/110569623850191718", - "path": "6", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2407/110569623850191719", - "path": "7", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2408/110569623850191720", - "path": "8", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2409/110569623850191721", - "path": "9", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/10/113328137245098014", + "path": "dir1/dir2/dir3/an9uf2eeox/10", + "inStateSince": 1729250141069, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 10", - "path": "10", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161542, + "modificationTime": 1729250161542, + "isKey": true }, { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191722", - "path": "11", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/11/113328137245818911", + "path": "dir1/dir2/dir3/an9uf2eeox/11", + "inStateSince": 1729250141080, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 12", - "path": "12", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/12/113328137247195168", + "path": "dir1/dir2/dir3/an9uf2eeox/12", + "inStateSince": 1729250141091, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 13", - "path": "13", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/13/113328137248178209", + "path": "dir1/dir2/dir3/an9uf2eeox/13", + "inStateSince": 1729250141116, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 14", - "path": "14", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/14/113328137249685538", + "path": "dir1/dir2/dir3/an9uf2eeox/14", + "inStateSince": 1729250141139, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 15", - "path": "15", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/15/113328137250930723", + "path": "dir1/dir2/dir3/an9uf2eeox/15", + "inStateSince": 1729250141158, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 16 key", - "path": "16", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/16/113328137252569124", + "path": "dir1/dir2/dir3/an9uf2eeox/16", + "inStateSince": 1729250141183, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 17 key", - "path": "17", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/17/113328137259778085", + "path": "dir1/dir2/dir3/an9uf2eeox/17", + "inStateSince": 1729250141293, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 18 key", - "path": "18", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/18/113328137261023270", + "path": "dir1/dir2/dir3/an9uf2eeox/18", + "inStateSince": 1729250141312, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 19 key", - "path": "19", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/19/113328137261088807", + "path": "dir1/dir2/dir3/an9uf2eeox/19", + "inStateSince": 1729250141313, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191723", - "path": "21", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true + } + ], + "status": "OK" + }, + "fso": { + "lastKey": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/9/113328113600626690", + "replicatedDataSize": 31457280, + "unreplicatedDataSize": 10485760, + "fso": [{ + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/0/113328113600561153", + "path": "dir1/dir2/dir3/pnrnqh5gux/0", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 22", - "path": "22", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820562, + "modificationTime": 1729249820562, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/1/113328113600626694", + "path": "dir1/dir2/dir3/pnrnqh5gux/1", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 23", - "path": "23", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820562, + "modificationTime": 1729249820562, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/2/113328113600626691", + "path": "dir1/dir2/dir3/pnrnqh5gux/2", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 24", - "path": "24", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820563, + "modificationTime": 1729249820563, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/3/113328113600692233", + "path": "dir1/dir2/dir3/pnrnqh5gux/3", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 25", - "path": "25", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820563, + "modificationTime": 1729249820563, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/4/113328113600626695", + "path": "dir1/dir2/dir3/pnrnqh5gux/4", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 26 key", - "path": "26", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820563, + "modificationTime": 1729249820563, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/5/113328113600561152", + "path": "dir1/dir2/dir3/pnrnqh5gux/5", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 17 key", - "path": "27", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/6/113328113600626692", + "path": "dir1/dir2/dir3/pnrnqh5gux/6", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 18 key", - "path": "28", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/7/113328113600626696", + "path": "dir1/dir2/dir3/pnrnqh5gux/7", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 19 key", - "path": "29", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/8/113328113600626693", + "path": "dir1/dir2/dir3/pnrnqh5gux/8", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 20 key", - "path": "20", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/9/113328113600626690", + "path": "dir1/dir2/dir3/pnrnqh5gux/9", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - } - ], + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820565, + "modificationTime": 1729249820565, + "isKey": true + }], "status": "OK" }, "keydeletePending": { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index d931a0ed79b..c2c046f1120 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -60,11 +60,11 @@ "@testing-library/react": "^12.1.5", "@types/react": "16.8.15", "@types/react-dom": "16.8.4", - "@types/react-router-dom": "^4.3.5", + "@types/react-router-dom": "^5.3.3", "@types/react-select": "^3.0.13", "@typescript-eslint/eslint-plugin": "^5.30.0", "@typescript-eslint/parser": "^5.30.0", - "@vitejs/plugin-react": "^4.0.0", + "@vitejs/plugin-react-swc": "^3.5.0", "eslint": "^7.28.0", "eslint-config-prettier": "^8.10.0", "eslint-plugin-prettier": "^3.4.1", @@ -73,8 +73,7 @@ "msw": "1.3.3", "npm-run-all": "^4.1.5", "prettier": "^2.8.4", - "vite": "4.5.3", - "vite-plugin-svgr": "^4.2.0", + "vite": "4.5.5", "vite-tsconfig-paths": "^3.6.0", "vitest": "^1.6.0" }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index d1b8844ac62..dfdbc7cedce 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -28,7 +28,7 @@ dependencies: version: 2.5.1 echarts: specifier: ^5.5.0 - version: 5.5.0 + version: 5.5.1 filesize: specifier: ^6.4.0 version: 6.4.0 @@ -63,7 +63,7 @@ dependencies: devDependencies: '@testing-library/jest-dom': specifier: ^6.4.8 - version: 6.4.8 + version: 6.5.0 '@testing-library/react': specifier: ^12.1.5 version: 12.1.5(react-dom@16.14.0)(react@16.14.0) @@ -74,8 +74,8 @@ devDependencies: specifier: 16.8.4 version: 16.8.4 '@types/react-router-dom': - specifier: ^4.3.5 - version: 4.3.5 + specifier: ^5.3.3 + version: 5.3.3 '@types/react-select': specifier: ^3.0.13 version: 3.1.2 @@ -85,9 +85,9 @@ devDependencies: '@typescript-eslint/parser': specifier: ^5.30.0 version: 5.62.0(eslint@7.32.0)(typescript@4.9.5) - '@vitejs/plugin-react': - specifier: ^4.0.0 - version: 4.3.1(vite@4.5.3) + '@vitejs/plugin-react-swc': + specifier: ^3.5.0 + version: 3.7.0(vite@4.5.5) eslint: specifier: ^7.28.0 version: 7.32.0 @@ -99,7 +99,7 @@ devDependencies: version: 3.4.1(eslint-config-prettier@8.10.0)(eslint@7.32.0)(prettier@2.8.8) jsdom: specifier: ^24.1.1 - version: 24.1.1 + version: 24.1.3 json-server: specifier: ^0.15.1 version: 0.15.1 @@ -113,17 +113,14 @@ devDependencies: specifier: ^2.8.4 version: 2.8.8 vite: - specifier: 4.5.3 - version: 4.5.3(less@3.13.1) - vite-plugin-svgr: - specifier: ^4.2.0 - version: 4.2.0(typescript@4.9.5)(vite@4.5.3) + specifier: 4.5.5 + version: 4.5.5(less@3.13.1) vite-tsconfig-paths: specifier: ^3.6.0 - version: 3.6.0(vite@4.5.3) + version: 3.6.0(vite@4.5.5) vitest: specifier: ^1.6.0 - version: 1.6.0(jsdom@24.1.1)(less@3.13.1) + version: 1.6.0(jsdom@24.1.3)(less@3.13.1) packages: @@ -131,14 +128,6 @@ packages: resolution: {integrity: sha512-Ff9+ksdQQB3rMncgqDK78uLznstjyfIf2Arnh22pW8kBpLs6rpKDwgnZT46hin5Hl1WzazzK64DOrhSwYpS7bQ==} dev: true - /@ampproject/remapping@2.3.0: - resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} - engines: {node: '>=6.0.0'} - dependencies: - '@jridgewell/gen-mapping': 0.3.5 - '@jridgewell/trace-mapping': 0.3.25 - dev: true - /@ant-design/colors@5.1.1: resolution: {integrity: sha512-Txy4KpHrp3q4XZdfgOBqLl+lkQIc3tEvHXOimRN1giX1AEC7mGtyrO9p8iRGJ3FLuVMGa2gNEzQyghVymLttKQ==} dependencies: @@ -164,7 +153,7 @@ packages: dependencies: '@ant-design/colors': 6.0.0 '@ant-design/icons-svg': 4.4.2 - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 lodash: 4.17.21 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -177,7 +166,7 @@ packages: peerDependencies: react: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 json2mq: 0.2.0 lodash: 4.17.21 @@ -198,140 +187,35 @@ packages: '@babel/highlight': 7.24.7 picocolors: 1.0.1 - /@babel/compat-data@7.24.7: - resolution: {integrity: sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==} - engines: {node: '>=6.9.0'} - dev: true - - /@babel/core@7.24.7: - resolution: {integrity: sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==} + /@babel/generator@7.25.6: + resolution: {integrity: sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==} engines: {node: '>=6.9.0'} dependencies: - '@ampproject/remapping': 2.3.0 - '@babel/code-frame': 7.24.7 - '@babel/generator': 7.24.7 - '@babel/helper-compilation-targets': 7.24.7 - '@babel/helper-module-transforms': 7.24.7(@babel/core@7.24.7) - '@babel/helpers': 7.24.7 - '@babel/parser': 7.24.7 - '@babel/template': 7.24.7 - '@babel/traverse': 7.24.7 - '@babel/types': 7.24.7 - convert-source-map: 2.0.0 - debug: 4.3.5 - gensync: 1.0.0-beta.2 - json5: 2.2.3 - semver: 6.3.1 - transitivePeerDependencies: - - supports-color - dev: true - - /@babel/generator@7.24.7: - resolution: {integrity: sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/types': 7.24.7 + '@babel/types': 7.25.6 '@jridgewell/gen-mapping': 0.3.5 '@jridgewell/trace-mapping': 0.3.25 jsesc: 2.5.2 - - /@babel/helper-compilation-targets@7.24.7: - resolution: {integrity: sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/compat-data': 7.24.7 - '@babel/helper-validator-option': 7.24.7 - browserslist: 4.23.1 - lru-cache: 5.1.1 - semver: 6.3.1 - dev: true - - /@babel/helper-environment-visitor@7.24.7: - resolution: {integrity: sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/types': 7.24.7 - - /@babel/helper-function-name@7.24.7: - resolution: {integrity: sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/template': 7.24.7 - '@babel/types': 7.24.7 - - /@babel/helper-hoist-variables@7.24.7: - resolution: {integrity: sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/types': 7.24.7 + dev: false /@babel/helper-module-imports@7.24.7: resolution: {integrity: sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==} engines: {node: '>=6.9.0'} dependencies: - '@babel/traverse': 7.24.7 - '@babel/types': 7.24.7 + '@babel/traverse': 7.25.6 + '@babel/types': 7.25.6 transitivePeerDependencies: - supports-color + dev: false - /@babel/helper-module-transforms@7.24.7(@babel/core@7.24.7): - resolution: {integrity: sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0 - dependencies: - '@babel/core': 7.24.7 - '@babel/helper-environment-visitor': 7.24.7 - '@babel/helper-module-imports': 7.24.7 - '@babel/helper-simple-access': 7.24.7 - '@babel/helper-split-export-declaration': 7.24.7 - '@babel/helper-validator-identifier': 7.24.7 - transitivePeerDependencies: - - supports-color - dev: true - - /@babel/helper-plugin-utils@7.24.7: - resolution: {integrity: sha512-Rq76wjt7yz9AAc1KnlRKNAi/dMSVWgDRx43FHoJEbcYU6xOWaE2dVPwcdTukJrjxS65GITyfbvEYHvkirZ6uEg==} - engines: {node: '>=6.9.0'} - dev: true - - /@babel/helper-simple-access@7.24.7: - resolution: {integrity: sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/traverse': 7.24.7 - '@babel/types': 7.24.7 - transitivePeerDependencies: - - supports-color - dev: true - - /@babel/helper-split-export-declaration@7.24.7: - resolution: {integrity: sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/types': 7.24.7 - - /@babel/helper-string-parser@7.24.7: - resolution: {integrity: sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==} + /@babel/helper-string-parser@7.24.8: + resolution: {integrity: sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==} engines: {node: '>=6.9.0'} + dev: false /@babel/helper-validator-identifier@7.24.7: resolution: {integrity: sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==} engines: {node: '>=6.9.0'} - /@babel/helper-validator-option@7.24.7: - resolution: {integrity: sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==} - engines: {node: '>=6.9.0'} - dev: true - - /@babel/helpers@7.24.7: - resolution: {integrity: sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/template': 7.24.7 - '@babel/types': 7.24.7 - dev: true - /@babel/highlight@7.24.7: resolution: {integrity: sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==} engines: {node: '>=6.9.0'} @@ -341,71 +225,52 @@ packages: js-tokens: 4.0.0 picocolors: 1.0.1 - /@babel/parser@7.24.7: - resolution: {integrity: sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==} + /@babel/parser@7.25.6: + resolution: {integrity: sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==} engines: {node: '>=6.0.0'} hasBin: true dependencies: - '@babel/types': 7.24.7 - - /@babel/plugin-transform-react-jsx-self@7.24.7(@babel/core@7.24.7): - resolution: {integrity: sha512-fOPQYbGSgH0HUp4UJO4sMBFjY6DuWq+2i8rixyUMb3CdGixs/gccURvYOAhajBdKDoGajFr3mUq5rH3phtkGzw==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 - '@babel/helper-plugin-utils': 7.24.7 - dev: true - - /@babel/plugin-transform-react-jsx-source@7.24.7(@babel/core@7.24.7): - resolution: {integrity: sha512-J2z+MWzZHVOemyLweMqngXrgGC42jQ//R0KdxqkIz/OrbVIIlhFI3WigZ5fO+nwFvBlncr4MGapd8vTyc7RPNQ==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 - '@babel/helper-plugin-utils': 7.24.7 - dev: true + '@babel/types': 7.25.6 + dev: false - /@babel/runtime@7.24.7: - resolution: {integrity: sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==} + /@babel/runtime@7.25.6: + resolution: {integrity: sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==} engines: {node: '>=6.9.0'} dependencies: regenerator-runtime: 0.14.1 - /@babel/template@7.24.7: - resolution: {integrity: sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==} + /@babel/template@7.25.0: + resolution: {integrity: sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 + '@babel/parser': 7.25.6 + '@babel/types': 7.25.6 + dev: false - /@babel/traverse@7.24.7: - resolution: {integrity: sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==} + /@babel/traverse@7.25.6: + resolution: {integrity: sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/generator': 7.24.7 - '@babel/helper-environment-visitor': 7.24.7 - '@babel/helper-function-name': 7.24.7 - '@babel/helper-hoist-variables': 7.24.7 - '@babel/helper-split-export-declaration': 7.24.7 - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 - debug: 4.3.5 + '@babel/generator': 7.25.6 + '@babel/parser': 7.25.6 + '@babel/template': 7.25.0 + '@babel/types': 7.25.6 + debug: 4.3.6 globals: 11.12.0 transitivePeerDependencies: - supports-color + dev: false - /@babel/types@7.24.7: - resolution: {integrity: sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==} + /@babel/types@7.25.6: + resolution: {integrity: sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==} engines: {node: '>=6.9.0'} dependencies: - '@babel/helper-string-parser': 7.24.7 + '@babel/helper-string-parser': 7.24.8 '@babel/helper-validator-identifier': 7.24.7 to-fast-properties: 2.0.0 + dev: false /@ctrl/tinycolor@3.6.1: resolution: {integrity: sha512-SITSV6aIXsuVNV3f3O0f2n/cgyEDWoSqtZMYiAmcsYHydcKrOz3gUxB/iXd/Qf08+IZX4KpgNbvUdMBmWz+kcA==} @@ -430,7 +295,7 @@ packages: peerDependencies: react: '>=16.3.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 '@emotion/cache': 10.0.29 '@emotion/css': 10.0.27 '@emotion/serialize': 0.11.16 @@ -904,8 +769,8 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@eslint-community/regexpp@4.10.1: - resolution: {integrity: sha512-Zm2NGpWELsQAD1xsJzGQpYfvICSsFkEpU0jxBjfdC6uNEWXcHnfs9hScFWtXVDVl+rBQJGrl4g1vcKIejpH9dA==} + /@eslint-community/regexpp@4.11.0: + resolution: {integrity: sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==} engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} dev: true @@ -914,7 +779,7 @@ packages: engines: {node: ^10.12.0 || >=12.0.0} dependencies: ajv: 6.12.6 - debug: 4.3.5 + debug: 4.3.6 espree: 7.3.1 globals: 13.24.0 ignore: 4.0.6 @@ -936,7 +801,7 @@ packages: deprecated: Use @eslint/config-array instead dependencies: '@humanwhocodes/object-schema': 1.2.1 - debug: 4.3.5 + debug: 4.3.6 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -971,7 +836,7 @@ packages: engines: {node: '>=6.0.0'} dependencies: '@jridgewell/set-array': 1.2.1 - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 '@jridgewell/trace-mapping': 0.3.25 /@jridgewell/resolve-uri@3.1.2: @@ -982,21 +847,21 @@ packages: resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} engines: {node: '>=6.0.0'} - /@jridgewell/sourcemap-codec@1.4.15: - resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + /@jridgewell/sourcemap-codec@1.5.0: + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} /@jridgewell/trace-mapping@0.3.25: resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} dependencies: '@jridgewell/resolve-uri': 3.1.2 - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 /@mswjs/cookies@0.2.2: resolution: {integrity: sha512-mlN83YSrcFgk7Dm1Mys40DLssI1KdJji2CMKN8eOlBqsTADYzj2+jWzsANsUTFbxDMWPD5e9bfA1RGqBpS3O1g==} engines: {node: '>=14'} dependencies: '@types/set-cookie-parser': 2.4.10 - set-cookie-parser: 2.6.0 + set-cookie-parser: 2.7.0 dev: true /@mswjs/interceptors@0.17.10: @@ -1006,7 +871,7 @@ packages: '@open-draft/until': 1.0.3 '@types/debug': 4.1.12 '@xmldom/xmldom': 0.8.10 - debug: 4.3.5 + debug: 4.3.6 headers-polyfill: 3.2.5 outvariant: 1.4.3 strict-event-emitter: 0.2.8 @@ -1047,142 +912,128 @@ packages: dev: true optional: true - /@rollup/pluginutils@5.1.0: - resolution: {integrity: sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==} - engines: {node: '>=14.0.0'} - peerDependencies: - rollup: ^1.20.0||^2.0.0||^3.0.0||^4.0.0 - peerDependenciesMeta: - rollup: - optional: true - dependencies: - '@types/estree': 1.0.5 - estree-walker: 2.0.2 - picomatch: 2.3.1 - dev: true - - /@rollup/rollup-android-arm-eabi@4.18.0: - resolution: {integrity: sha512-Tya6xypR10giZV1XzxmH5wr25VcZSncG0pZIjfePT0OVBvqNEurzValetGNarVrGiq66EBVAFn15iYX4w6FKgQ==} + /@rollup/rollup-android-arm-eabi@4.21.1: + resolution: {integrity: sha512-2thheikVEuU7ZxFXubPDOtspKn1x0yqaYQwvALVtEcvFhMifPADBrgRPyHV0TF3b+9BgvgjgagVyvA/UqPZHmg==} cpu: [arm] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-android-arm64@4.18.0: - resolution: {integrity: sha512-avCea0RAP03lTsDhEyfy+hpfr85KfyTctMADqHVhLAF3MlIkq83CP8UfAHUssgXTYd+6er6PaAhx/QGv4L1EiA==} + /@rollup/rollup-android-arm64@4.21.1: + resolution: {integrity: sha512-t1lLYn4V9WgnIFHXy1d2Di/7gyzBWS8G5pQSXdZqfrdCGTwi1VasRMSS81DTYb+avDs/Zz4A6dzERki5oRYz1g==} cpu: [arm64] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-arm64@4.18.0: - resolution: {integrity: sha512-IWfdwU7KDSm07Ty0PuA/W2JYoZ4iTj3TUQjkVsO/6U+4I1jN5lcR71ZEvRh52sDOERdnNhhHU57UITXz5jC1/w==} + /@rollup/rollup-darwin-arm64@4.21.1: + resolution: {integrity: sha512-AH/wNWSEEHvs6t4iJ3RANxW5ZCK3fUnmf0gyMxWCesY1AlUj8jY7GC+rQE4wd3gwmZ9XDOpL0kcFnCjtN7FXlA==} cpu: [arm64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-x64@4.18.0: - resolution: {integrity: sha512-n2LMsUz7Ynu7DoQrSQkBf8iNrjOGyPLrdSg802vk6XT3FtsgX6JbE8IHRvposskFm9SNxzkLYGSq9QdpLYpRNA==} + /@rollup/rollup-darwin-x64@4.21.1: + resolution: {integrity: sha512-dO0BIz/+5ZdkLZrVgQrDdW7m2RkrLwYTh2YMFG9IpBtlC1x1NPNSXkfczhZieOlOLEqgXOFH3wYHB7PmBtf+Bg==} cpu: [x64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm-gnueabihf@4.18.0: - resolution: {integrity: sha512-C/zbRYRXFjWvz9Z4haRxcTdnkPt1BtCkz+7RtBSuNmKzMzp3ZxdM28Mpccn6pt28/UWUCTXa+b0Mx1k3g6NOMA==} + /@rollup/rollup-linux-arm-gnueabihf@4.21.1: + resolution: {integrity: sha512-sWWgdQ1fq+XKrlda8PsMCfut8caFwZBmhYeoehJ05FdI0YZXk6ZyUjWLrIgbR/VgiGycrFKMMgp7eJ69HOF2pQ==} cpu: [arm] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm-musleabihf@4.18.0: - resolution: {integrity: sha512-l3m9ewPgjQSXrUMHg93vt0hYCGnrMOcUpTz6FLtbwljo2HluS4zTXFy2571YQbisTnfTKPZ01u/ukJdQTLGh9A==} + /@rollup/rollup-linux-arm-musleabihf@4.21.1: + resolution: {integrity: sha512-9OIiSuj5EsYQlmwhmFRA0LRO0dRRjdCVZA3hnmZe1rEwRk11Jy3ECGGq3a7RrVEZ0/pCsYWx8jG3IvcrJ6RCew==} cpu: [arm] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-gnu@4.18.0: - resolution: {integrity: sha512-rJ5D47d8WD7J+7STKdCUAgmQk49xuFrRi9pZkWoRD1UeSMakbcepWXPF8ycChBoAqs1pb2wzvbY6Q33WmN2ftw==} + /@rollup/rollup-linux-arm64-gnu@4.21.1: + resolution: {integrity: sha512-0kuAkRK4MeIUbzQYu63NrJmfoUVicajoRAL1bpwdYIYRcs57iyIV9NLcuyDyDXE2GiZCL4uhKSYAnyWpjZkWow==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-musl@4.18.0: - resolution: {integrity: sha512-be6Yx37b24ZwxQ+wOQXXLZqpq4jTckJhtGlWGZs68TgdKXJgw54lUUoFYrg6Zs/kjzAQwEwYbp8JxZVzZLRepQ==} + /@rollup/rollup-linux-arm64-musl@4.21.1: + resolution: {integrity: sha512-/6dYC9fZtfEY0vozpc5bx1RP4VrtEOhNQGb0HwvYNwXD1BBbwQ5cKIbUVVU7G2d5WRE90NfB922elN8ASXAJEA==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-powerpc64le-gnu@4.18.0: - resolution: {integrity: sha512-hNVMQK+qrA9Todu9+wqrXOHxFiD5YmdEi3paj6vP02Kx1hjd2LLYR2eaN7DsEshg09+9uzWi2W18MJDlG0cxJA==} + /@rollup/rollup-linux-powerpc64le-gnu@4.21.1: + resolution: {integrity: sha512-ltUWy+sHeAh3YZ91NUsV4Xg3uBXAlscQe8ZOXRCVAKLsivGuJsrkawYPUEyCV3DYa9urgJugMLn8Z3Z/6CeyRQ==} cpu: [ppc64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-riscv64-gnu@4.18.0: - resolution: {integrity: sha512-ROCM7i+m1NfdrsmvwSzoxp9HFtmKGHEqu5NNDiZWQtXLA8S5HBCkVvKAxJ8U+CVctHwV2Gb5VUaK7UAkzhDjlg==} + /@rollup/rollup-linux-riscv64-gnu@4.21.1: + resolution: {integrity: sha512-BggMndzI7Tlv4/abrgLwa/dxNEMn2gC61DCLrTzw8LkpSKel4o+O+gtjbnkevZ18SKkeN3ihRGPuBxjaetWzWg==} cpu: [riscv64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-s390x-gnu@4.18.0: - resolution: {integrity: sha512-0UyyRHyDN42QL+NbqevXIIUnKA47A+45WyasO+y2bGJ1mhQrfrtXUpTxCOrfxCR4esV3/RLYyucGVPiUsO8xjg==} + /@rollup/rollup-linux-s390x-gnu@4.21.1: + resolution: {integrity: sha512-z/9rtlGd/OMv+gb1mNSjElasMf9yXusAxnRDrBaYB+eS1shFm6/4/xDH1SAISO5729fFKUkJ88TkGPRUh8WSAA==} cpu: [s390x] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-gnu@4.18.0: - resolution: {integrity: sha512-xuglR2rBVHA5UsI8h8UbX4VJ470PtGCf5Vpswh7p2ukaqBGFTnsfzxUBetoWBWymHMxbIG0Cmx7Y9qDZzr648w==} + /@rollup/rollup-linux-x64-gnu@4.21.1: + resolution: {integrity: sha512-kXQVcWqDcDKw0S2E0TmhlTLlUgAmMVqPrJZR+KpH/1ZaZhLSl23GZpQVmawBQGVhyP5WXIsIQ/zqbDBBYmxm5w==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-musl@4.18.0: - resolution: {integrity: sha512-LKaqQL9osY/ir2geuLVvRRs+utWUNilzdE90TpyoX0eNqPzWjRm14oMEE+YLve4k/NAqCdPkGYDaDF5Sw+xBfg==} + /@rollup/rollup-linux-x64-musl@4.21.1: + resolution: {integrity: sha512-CbFv/WMQsSdl+bpX6rVbzR4kAjSSBuDgCqb1l4J68UYsQNalz5wOqLGYj4ZI0thGpyX5kc+LLZ9CL+kpqDovZA==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-arm64-msvc@4.18.0: - resolution: {integrity: sha512-7J6TkZQFGo9qBKH0pk2cEVSRhJbL6MtfWxth7Y5YmZs57Pi+4x6c2dStAUvaQkHQLnEQv1jzBUW43GvZW8OFqA==} + /@rollup/rollup-win32-arm64-msvc@4.21.1: + resolution: {integrity: sha512-3Q3brDgA86gHXWHklrwdREKIrIbxC0ZgU8lwpj0eEKGBQH+31uPqr0P2v11pn0tSIxHvcdOWxa4j+YvLNx1i6g==} cpu: [arm64] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-ia32-msvc@4.18.0: - resolution: {integrity: sha512-Txjh+IxBPbkUB9+SXZMpv+b/vnTEtFyfWZgJ6iyCmt2tdx0OF5WhFowLmnh8ENGNpfUlUZkdI//4IEmhwPieNg==} + /@rollup/rollup-win32-ia32-msvc@4.21.1: + resolution: {integrity: sha512-tNg+jJcKR3Uwe4L0/wY3Ro0H+u3nrb04+tcq1GSYzBEmKLeOQF2emk1whxlzNqb6MMrQ2JOcQEpuuiPLyRcSIw==} cpu: [ia32] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-x64-msvc@4.18.0: - resolution: {integrity: sha512-UOo5FdvOL0+eIVTgS4tIdbW+TtnBLWg1YBCcU2KWM7nuNwRz9bksDX1bekJJCpu25N1DVWaCwnT39dVQxzqS8g==} + /@rollup/rollup-win32-x64-msvc@4.21.1: + resolution: {integrity: sha512-xGiIH95H1zU7naUyTKEyOA/I0aexNMUdO9qRv0bLKN3qu25bBdrxZHqA3PTJ24YNN/GdMzG4xkDcd/GvjuhfLg==} cpu: [x64] os: [win32] requiresBuild: true @@ -1198,130 +1049,129 @@ packages: engines: {node: '>=6'} dev: true - /@svgr/babel-plugin-add-jsx-attribute@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-darwin-arm64@1.7.21: + resolution: {integrity: sha512-hh5uOZ7jWF66z2TRMhhXtWMQkssuPCSIZPy9VHf5KvZ46cX+5UeECDthchYklEVZQyy4Qr6oxfh4qff/5spoMA==} + engines: {node: '>=10'} + cpu: [arm64] + os: [darwin] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-remove-jsx-attribute@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-darwin-x64@1.7.21: + resolution: {integrity: sha512-lTsPquqSierQ6jWiWM7NnYXXZGk9zx3NGkPLHjPbcH5BmyiauX0CC/YJYJx7YmS2InRLyALlGmidHkaF4JY28A==} + engines: {node: '>=10'} + cpu: [x64] + os: [darwin] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-remove-jsx-empty-expression@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-arm-gnueabihf@1.7.21: + resolution: {integrity: sha512-AgSd0fnSzAqCvWpzzZCq75z62JVGUkkXEOpfdi99jj/tryPy38KdXJtkVWJmufPXlRHokGTBitalk33WDJwsbA==} + engines: {node: '>=10'} + cpu: [arm] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-replace-jsx-attribute-value@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-arm64-gnu@1.7.21: + resolution: {integrity: sha512-l+jw6RQ4Y43/8dIst0c73uQE+W3kCWrCFqMqC/xIuE/iqHOnvYK6YbA1ffOct2dImkHzNiKuoehGqtQAc6cNaQ==} + engines: {node: '>=10'} + cpu: [arm64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-svg-dynamic-title@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-arm64-musl@1.7.21: + resolution: {integrity: sha512-29KKZXrTo/c9F1JFL9WsNvCa6UCdIVhHP5EfuYhlKbn5/YmSsNFkuHdUtZFEd5U4+jiShXDmgGCtLW2d08LIwg==} + engines: {node: '>=10'} + cpu: [arm64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-svg-em-dimensions@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-x64-gnu@1.7.21: + resolution: {integrity: sha512-HsP3JwddvQj5HvnjmOr+Bd5plEm6ccpfP5wUlm3hywzvdVkj+yR29bmD7UwpV/1zCQ60Ry35a7mXhKI6HQxFgw==} + engines: {node: '>=10'} + cpu: [x64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-transform-react-native-svg@8.1.0(@babel/core@7.24.7): - resolution: {integrity: sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-x64-musl@1.7.21: + resolution: {integrity: sha512-hYKLVeUTHqvFK628DFJEwxoX6p42T3HaQ4QjNtf3oKhiJWFh9iTRUrN/oCB5YI3R9WMkFkKh+99gZ/Dd0T5lsg==} + engines: {node: '>=10'} + cpu: [x64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-transform-svg-component@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==} - engines: {node: '>=12'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-win32-arm64-msvc@1.7.21: + resolution: {integrity: sha512-qyWAKW10aMBe6iUqeZ7NAJIswjfggVTUpDINpQGUJhz+pR71YZDidXgZXpaDB84YyDB2JAlRqd1YrLkl7CMiIw==} + engines: {node: '>=10'} + cpu: [arm64] + os: [win32] + requiresBuild: true dev: true + optional: true - /@svgr/babel-preset@8.1.0(@babel/core@7.24.7): - resolution: {integrity: sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 - '@svgr/babel-plugin-add-jsx-attribute': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-remove-jsx-attribute': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-remove-jsx-empty-expression': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-replace-jsx-attribute-value': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-svg-dynamic-title': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-svg-em-dimensions': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-transform-react-native-svg': 8.1.0(@babel/core@7.24.7) - '@svgr/babel-plugin-transform-svg-component': 8.0.0(@babel/core@7.24.7) + /@swc/core-win32-ia32-msvc@1.7.21: + resolution: {integrity: sha512-cy61wS3wgH5mEwBiQ5w6/FnQrchBDAdPsSh0dKSzNmI+4K8hDxS8uzdBycWqJXO0cc+mA77SIlwZC3hP3Kum2g==} + engines: {node: '>=10'} + cpu: [ia32] + os: [win32] + requiresBuild: true dev: true + optional: true - /@svgr/core@8.1.0(typescript@4.9.5): - resolution: {integrity: sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==} - engines: {node: '>=14'} - dependencies: - '@babel/core': 7.24.7 - '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) - camelcase: 6.3.0 - cosmiconfig: 8.3.6(typescript@4.9.5) - snake-case: 3.0.4 - transitivePeerDependencies: - - supports-color - - typescript + /@swc/core-win32-x64-msvc@1.7.21: + resolution: {integrity: sha512-/rexGItJURNJOdae+a48M+loT74nsEU+PyRRVAkZMKNRtLoYFAr0cpDlS5FodIgGunp/nqM0bst4H2w6Y05IKA==} + engines: {node: '>=10'} + cpu: [x64] + os: [win32] + requiresBuild: true dev: true + optional: true - /@svgr/hast-util-to-babel-ast@8.0.0: - resolution: {integrity: sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==} - engines: {node: '>=14'} + /@swc/core@1.7.21: + resolution: {integrity: sha512-7/cN0SZ+y2V6e0hsDD8koGR0QVh7Jl3r756bwaHLLSN+kReoUb/yVcLsA8iTn90JLME3DkQK4CPjxDCQiyMXNg==} + engines: {node: '>=10'} + requiresBuild: true + peerDependencies: + '@swc/helpers': '*' + peerDependenciesMeta: + '@swc/helpers': + optional: true dependencies: - '@babel/types': 7.24.7 - entities: 4.5.0 + '@swc/counter': 0.1.3 + '@swc/types': 0.1.12 + optionalDependencies: + '@swc/core-darwin-arm64': 1.7.21 + '@swc/core-darwin-x64': 1.7.21 + '@swc/core-linux-arm-gnueabihf': 1.7.21 + '@swc/core-linux-arm64-gnu': 1.7.21 + '@swc/core-linux-arm64-musl': 1.7.21 + '@swc/core-linux-x64-gnu': 1.7.21 + '@swc/core-linux-x64-musl': 1.7.21 + '@swc/core-win32-arm64-msvc': 1.7.21 + '@swc/core-win32-ia32-msvc': 1.7.21 + '@swc/core-win32-x64-msvc': 1.7.21 dev: true - /@svgr/plugin-jsx@8.1.0(@svgr/core@8.1.0): - resolution: {integrity: sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==} - engines: {node: '>=14'} - peerDependencies: - '@svgr/core': '*' + /@swc/counter@0.1.3: + resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} + dev: true + + /@swc/types@0.1.12: + resolution: {integrity: sha512-wBJA+SdtkbFhHjTMYH+dEH1y4VpfGdAc2Kw/LK09i9bXd/K6j6PkDcFCEzb6iVfZMkPRrl/q0e3toqTAJdkIVA==} dependencies: - '@babel/core': 7.24.7 - '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) - '@svgr/core': 8.1.0(typescript@4.9.5) - '@svgr/hast-util-to-babel-ast': 8.0.0 - svg-parser: 2.0.4 - transitivePeerDependencies: - - supports-color + '@swc/counter': 0.1.3 dev: true /@szmarczak/http-timer@1.1.2: @@ -1336,7 +1186,7 @@ packages: engines: {node: '>=12'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 '@types/aria-query': 5.0.4 aria-query: 5.1.3 chalk: 4.1.2 @@ -1345,12 +1195,11 @@ packages: pretty-format: 27.5.1 dev: true - /@testing-library/jest-dom@6.4.8: - resolution: {integrity: sha512-JD0G+Zc38f5MBHA4NgxQMR5XtO5Jx9g86jqturNTt2WUfRmLDIY7iKkWHDCCTiDuFMre6nxAD5wHw9W5kI4rGw==} + /@testing-library/jest-dom@6.5.0: + resolution: {integrity: sha512-xGGHpBXYSHUUr6XsKBfs85TWlYKpTc37cSBBVrXcib2MkHLboWlkClhWF37JKlDb9KEq3dHs+f2xR7XJEWGBxA==} engines: {node: '>=14', npm: '>=6', yarn: '>=1'} dependencies: '@adobe/css-tools': 4.4.0 - '@babel/runtime': 7.24.7 aria-query: 5.3.0 chalk: 3.0.0 css.escape: 1.5.1 @@ -1366,7 +1215,7 @@ packages: react: <18.0.0 react-dom: <18.0.0 dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 '@testing-library/dom': 8.20.1 '@types/react-dom': 16.8.4 react: 16.14.0 @@ -1377,35 +1226,6 @@ packages: resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} dev: true - /@types/babel__core@7.20.5: - resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} - dependencies: - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 - '@types/babel__generator': 7.6.8 - '@types/babel__template': 7.4.4 - '@types/babel__traverse': 7.20.6 - dev: true - - /@types/babel__generator@7.6.8: - resolution: {integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==} - dependencies: - '@babel/types': 7.24.7 - dev: true - - /@types/babel__template@7.4.4: - resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} - dependencies: - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 - dev: true - - /@types/babel__traverse@7.20.6: - resolution: {integrity: sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==} - dependencies: - '@babel/types': 7.24.7 - dev: true - /@types/cookie@0.4.1: resolution: {integrity: sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==} dev: true @@ -1424,13 +1244,6 @@ packages: resolution: {integrity: sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==} dev: true - /@types/history@5.0.0: - resolution: {integrity: sha512-hy8b7Y1J8OGe6LbAjj3xniQrj3v6lsivCcrmf4TzSgPzLkhIeKgc5IZnT7ReIqmEuodjfO8EYAuoFvIrHi/+jQ==} - deprecated: This is a stub types definition. history provides its own type definitions, so you do not need this installed. - dependencies: - history: 5.3.0 - dev: true - /@types/js-levenshtein@1.1.3: resolution: {integrity: sha512-jd+Q+sD20Qfu9e2aEXogiO3vpOC1PYJOUdyN9gvs4Qrvkg4wF43L5OhqrPeokdv8TL0/mXoYfpkcoGZMNN2pkQ==} dev: true @@ -1442,17 +1255,17 @@ packages: /@types/keyv@3.1.4: resolution: {integrity: sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==} dependencies: - '@types/node': 20.14.8 + '@types/node': 22.5.1 dev: true /@types/ms@0.7.34: resolution: {integrity: sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==} dev: true - /@types/node@20.14.8: - resolution: {integrity: sha512-DO+2/jZinXfROG7j7WKFn/3C6nFwxy2lLpgLjEXJz+0XKphZlTLJ14mo8Vfg8X5BWN6XjyESXq+LcYdT7tR3bA==} + /@types/node@22.5.1: + resolution: {integrity: sha512-KkHsxej0j9IW1KKOOAA/XBA0z08UFSrRQHErzEfA3Vgq57eXIMYboIlHJuYIfd+lwCQjtKqUu3UnmKbtUc9yRw==} dependencies: - undici-types: 5.26.5 + undici-types: 6.19.8 dev: true /@types/parse-json@4.0.2: @@ -1469,10 +1282,10 @@ packages: '@types/react': 16.8.15 dev: true - /@types/react-router-dom@4.3.5: - resolution: {integrity: sha512-eFajSUASYbPHg2BDM1G8Btx+YqGgvROPIg6sBhl3O4kbDdYXdFdfrgQFf/pcBuQVObjfT9AL/dd15jilR5DIEA==} + /@types/react-router-dom@5.3.3: + resolution: {integrity: sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==} dependencies: - '@types/history': 5.0.0 + '@types/history': 4.7.11 '@types/react': 16.8.15 '@types/react-router': 5.1.20 dev: true @@ -1489,11 +1302,11 @@ packages: dependencies: '@types/react': 16.8.15 '@types/react-dom': 16.8.4 - '@types/react-transition-group': 4.4.10 + '@types/react-transition-group': 4.4.11 dev: true - /@types/react-transition-group@4.4.10: - resolution: {integrity: sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==} + /@types/react-transition-group@4.4.11: + resolution: {integrity: sha512-RM05tAniPZ5DZPzzNFP+DmrcOdD0efDUxMy3145oljWSl3x9ZV5vhme98gTxFrj2lhXvmGNnUiuDyJgY9IKkNA==} dependencies: '@types/react': 16.8.15 dev: true @@ -1508,7 +1321,7 @@ packages: /@types/responselike@1.0.3: resolution: {integrity: sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw==} dependencies: - '@types/node': 20.14.8 + '@types/node': 22.5.1 dev: true /@types/semver@7.5.8: @@ -1518,7 +1331,7 @@ packages: /@types/set-cookie-parser@2.4.10: resolution: {integrity: sha512-GGmQVGpQWUe5qglJozEjZV/5dyxbOOZ0LHe/lqyWssB88Y4svNfst0uqBVscdDeIKl5Jy5+aPSvy7mI9tYRguw==} dependencies: - '@types/node': 20.14.8 + '@types/node': 22.5.1 dev: true /@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0)(eslint@7.32.0)(typescript@4.9.5): @@ -1532,17 +1345,17 @@ packages: typescript: optional: true dependencies: - '@eslint-community/regexpp': 4.10.1 + '@eslint-community/regexpp': 4.11.0 '@typescript-eslint/parser': 5.62.0(eslint@7.32.0)(typescript@4.9.5) '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/type-utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) '@typescript-eslint/utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) - debug: 4.3.5 + debug: 4.3.6 eslint: 7.32.0 graphemer: 1.4.0 - ignore: 5.3.1 + ignore: 5.3.2 natural-compare-lite: 1.4.0 - semver: 7.6.2 + semver: 7.6.3 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 transitivePeerDependencies: @@ -1562,7 +1375,7 @@ packages: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/types': 5.62.0 '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) - debug: 4.3.5 + debug: 4.3.6 eslint: 7.32.0 typescript: 4.9.5 transitivePeerDependencies: @@ -1589,7 +1402,7 @@ packages: dependencies: '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) '@typescript-eslint/utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) - debug: 4.3.5 + debug: 4.3.6 eslint: 7.32.0 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 @@ -1613,10 +1426,10 @@ packages: dependencies: '@typescript-eslint/types': 5.62.0 '@typescript-eslint/visitor-keys': 5.62.0 - debug: 4.3.5 + debug: 4.3.6 globby: 11.1.0 is-glob: 4.0.3 - semver: 7.6.2 + semver: 7.6.3 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 transitivePeerDependencies: @@ -1637,7 +1450,7 @@ packages: '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) eslint: 7.32.0 eslint-scope: 5.1.1 - semver: 7.6.2 + semver: 7.6.3 transitivePeerDependencies: - supports-color - typescript @@ -1651,20 +1464,15 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@vitejs/plugin-react@4.3.1(vite@4.5.3): - resolution: {integrity: sha512-m/V2syj5CuVnaxcUJOQRel/Wr31FFXRFlnOoq1TVtkCxsY5veGMTEmpWHndrhB2U8ScHtCQB1e+4hWYExQc6Lg==} - engines: {node: ^14.18.0 || >=16.0.0} + /@vitejs/plugin-react-swc@3.7.0(vite@4.5.5): + resolution: {integrity: sha512-yrknSb3Dci6svCd/qhHqhFPDSw0QtjumcqdKMoNNzmOl5lMXTTiqzjWtG4Qask2HdvvzaNgSunbQGet8/GrKdA==} peerDependencies: - vite: ^4.2.0 || ^5.0.0 - dependencies: - '@babel/core': 7.24.7 - '@babel/plugin-transform-react-jsx-self': 7.24.7(@babel/core@7.24.7) - '@babel/plugin-transform-react-jsx-source': 7.24.7(@babel/core@7.24.7) - '@types/babel__core': 7.20.5 - react-refresh: 0.14.2 - vite: 4.5.3(less@3.13.1) + vite: ^4 || ^5 + dependencies: + '@swc/core': 1.7.21 + vite: 4.5.5(less@3.13.1) transitivePeerDependencies: - - supports-color + - '@swc/helpers' dev: true /@vitest/expect@1.6.0: @@ -1672,7 +1480,7 @@ packages: dependencies: '@vitest/spy': 1.6.0 '@vitest/utils': 1.6.0 - chai: 4.4.1 + chai: 4.5.0 dev: true /@vitest/runner@1.6.0: @@ -1686,7 +1494,7 @@ packages: /@vitest/snapshot@1.6.0: resolution: {integrity: sha512-+Hx43f8Chus+DCmygqqfetcAZrDJwvTj0ymqjQq4CvmpKFSTVteEOBzCusu1x2tt4OJcvBflyHUE0DZSLgEMtQ==} dependencies: - magic-string: 0.30.10 + magic-string: 0.30.11 pathe: 1.1.2 pretty-format: 29.7.0 dev: true @@ -1737,7 +1545,7 @@ packages: resolution: {integrity: sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==} engines: {node: '>=0.4.0'} dependencies: - acorn: 8.12.0 + acorn: 8.12.1 dev: true /acorn@7.4.1: @@ -1746,8 +1554,8 @@ packages: hasBin: true dev: true - /acorn@8.12.0: - resolution: {integrity: sha512-RTvkC4w+KNXrM39/lWCUaG0IbRkWdCv7W/IOW9oU6SawyxulvkQy5HQPVTKxEjczcUvapcrw3cFx/60VN/NRNw==} + /acorn@8.12.1: + resolution: {integrity: sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==} engines: {node: '>=0.4.0'} hasBin: true dev: true @@ -1773,7 +1581,7 @@ packages: resolution: {integrity: sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==} engines: {node: '>= 14'} dependencies: - debug: 4.3.5 + debug: 4.3.6 transitivePeerDependencies: - supports-color dev: true @@ -1787,13 +1595,13 @@ packages: uri-js: 4.4.1 dev: true - /ajv@8.16.0: - resolution: {integrity: sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==} + /ajv@8.17.1: + resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} dependencies: fast-deep-equal: 3.1.3 + fast-uri: 3.0.1 json-schema-traverse: 1.0.0 require-from-string: 2.0.2 - uri-js: 4.4.1 dev: true /ansi-align@3.0.1: @@ -1866,7 +1674,7 @@ packages: '@ant-design/colors': 5.1.1 '@ant-design/icons': 4.8.3(react-dom@16.14.0)(react@16.14.0) '@ant-design/react-slick': 0.28.4(react@16.14.0) - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 array-tree-filter: 2.1.0 classnames: 2.5.1 copy-to-clipboard: 3.3.3 @@ -1926,10 +1734,6 @@ packages: sprintf-js: 1.0.3 dev: true - /argparse@2.0.1: - resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} - dev: true - /aria-query@5.1.3: resolution: {integrity: sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==} dependencies: @@ -2015,8 +1819,8 @@ packages: resolution: {integrity: sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==} dev: true - /aws4@1.13.0: - resolution: {integrity: sha512-3AungXC4I8kKsS9PuS4JH2nc+0bVY/mjgrephHTIi8fpEeGsTHBUJeosp0Wc1myYMElmD0B3Oc4XL/HVJ4PV2g==} + /aws4@1.13.2: + resolution: {integrity: sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==} dev: true /axios@0.28.1: @@ -2049,7 +1853,7 @@ packages: /babel-plugin-macros@2.8.0: resolution: {integrity: sha512-SEP5kJpfGYqYKpBrj5XU3ahw5p5GOHJ0U5ssOSQ/WBVdwkD2Dzlce95exQTs3jOVWPPKLBN2rlEWkCK7dSmLvg==} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 cosmiconfig: 6.0.0 resolve: 1.22.8 dev: false @@ -2092,8 +1896,8 @@ packages: readable-stream: 3.6.2 dev: true - /body-parser@1.20.2: - resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} + /body-parser@1.20.3: + resolution: {integrity: sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 @@ -2104,7 +1908,7 @@ packages: http-errors: 2.0.0 iconv-lite: 0.4.24 on-finished: 2.4.1 - qs: 6.11.0 + qs: 6.13.0 raw-body: 2.5.2 type-is: 1.6.18 unpipe: 1.0.0 @@ -2146,17 +1950,6 @@ packages: fill-range: 7.1.1 dev: true - /browserslist@4.23.1: - resolution: {integrity: sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==} - engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} - hasBin: true - dependencies: - caniuse-lite: 1.0.30001636 - electron-to-chromium: 1.4.810 - node-releases: 2.0.14 - update-browserslist-db: 1.0.16(browserslist@4.23.1) - dev: true - /buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} dependencies: @@ -2212,21 +2005,12 @@ packages: engines: {node: '>=6'} dev: true - /camelcase@6.3.0: - resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} - engines: {node: '>=10'} - dev: true - - /caniuse-lite@1.0.30001636: - resolution: {integrity: sha512-bMg2vmr8XBsbL6Lr0UHXy/21m84FTxDLWn2FSqMd5PrlbMxwJlQnC2YWYxVgp66PZE+BBNF2jYQUBKCo1FDeZg==} - dev: true - /caseless@0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} dev: true - /chai@4.4.1: - resolution: {integrity: sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==} + /chai@4.5.0: + resolution: {integrity: sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==} engines: {node: '>=4'} dependencies: assertion-error: 1.1.0 @@ -2235,7 +2019,7 @@ packages: get-func-name: 2.0.2 loupe: 2.3.7 pathval: 1.1.1 - type-detect: 4.0.8 + type-detect: 4.1.0 dev: true /chalk@2.4.2: @@ -2379,7 +2163,7 @@ packages: resolution: {integrity: sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==} engines: {node: '>= 0.6'} dependencies: - mime-db: 1.52.0 + mime-db: 1.53.0 dev: true /compression@1.7.4: @@ -2441,10 +2225,6 @@ packages: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} dev: false - /convert-source-map@2.0.0: - resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} - dev: true - /cookie-signature@1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} dev: true @@ -2493,22 +2273,6 @@ packages: yaml: 1.10.2 dev: false - /cosmiconfig@8.3.6(typescript@4.9.5): - resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} - engines: {node: '>=14'} - peerDependencies: - typescript: '>=4.9.5' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - import-fresh: 3.3.0 - js-yaml: 4.1.0 - parse-json: 5.2.0 - path-type: 4.0.0 - typescript: 4.9.5 - dev: true - /cross-spawn@5.1.0: resolution: {integrity: sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==} dependencies: @@ -2606,11 +2370,11 @@ packages: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 dev: false - /dayjs@1.11.11: - resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==} + /dayjs@1.11.13: + resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==} dev: false /debug@2.6.9: @@ -2635,8 +2399,8 @@ packages: ms: 2.0.0 dev: true - /debug@4.3.5: - resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==} + /debug@4.3.6: + resolution: {integrity: sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -2666,7 +2430,7 @@ packages: resolution: {integrity: sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==} engines: {node: '>=6'} dependencies: - type-detect: 4.0.8 + type-detect: 4.1.0 dev: true /deep-equal@2.2.3: @@ -2783,17 +2547,10 @@ packages: /dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 csstype: 3.1.3 dev: false - /dot-case@3.0.4: - resolution: {integrity: sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==} - dependencies: - no-case: 3.0.4 - tslib: 2.6.3 - dev: true - /dot-prop@4.2.1: resolution: {integrity: sha512-l0p4+mIuJIua0mhxGoh4a+iNL9bmeK5DvnSVQa6T0OhrVmaEa1XScX5Etc673FePCJOArq/4Pa2cLGODUWTPOQ==} engines: {node: '>=4'} @@ -2816,21 +2573,17 @@ packages: safer-buffer: 2.1.2 dev: true - /echarts@5.5.0: - resolution: {integrity: sha512-rNYnNCzqDAPCr4m/fqyUFv7fD9qIsd50S6GDFgO1DxZhncCsNsG7IfUlAlvZe5oSEQxtsjnHiUuppzccry93Xw==} + /echarts@5.5.1: + resolution: {integrity: sha512-Fce8upazaAXUVUVsjgV6mBnGuqgO+JNDlcgF79Dksy4+wgGpQB2lmYoO4TSweFg/mZITdpGHomw/cNBJZj1icA==} dependencies: tslib: 2.3.0 - zrender: 5.5.0 + zrender: 5.6.0 dev: false /ee-first@1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} dev: true - /electron-to-chromium@1.4.810: - resolution: {integrity: sha512-Kaxhu4T7SJGpRQx99tq216gCq2nMxJo+uuT6uzz9l8TVN2stL7M06MIIXAtr9jsrLs2Glflgf2vMQRepxawOdQ==} - dev: true - /emoji-regex@7.0.3: resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} dev: true @@ -2848,6 +2601,11 @@ packages: engines: {node: '>= 0.8'} dev: true + /encodeurl@2.0.0: + resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} + engines: {node: '>= 0.8'} + dev: true + /end-of-stream@1.4.4: resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==} dependencies: @@ -3137,7 +2895,7 @@ packages: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.3 - debug: 4.3.5 + debug: 4.3.6 doctrine: 3.0.0 enquirer: 2.4.1 escape-string-regexp: 4.0.0 @@ -3145,7 +2903,7 @@ packages: eslint-utils: 2.1.0 eslint-visitor-keys: 2.1.0 espree: 7.3.1 - esquery: 1.5.0 + esquery: 1.6.0 esutils: 2.0.3 fast-deep-equal: 3.1.3 file-entry-cache: 6.0.1 @@ -3165,7 +2923,7 @@ packages: optionator: 0.9.4 progress: 2.0.3 regexpp: 3.2.0 - semver: 7.6.2 + semver: 7.6.3 strip-ansi: 6.0.1 strip-json-comments: 3.1.1 table: 6.8.2 @@ -3190,8 +2948,8 @@ packages: hasBin: true dev: true - /esquery@1.5.0: - resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + /esquery@1.6.0: + resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} engines: {node: '>=0.10'} dependencies: estraverse: 5.3.0 @@ -3214,10 +2972,6 @@ packages: engines: {node: '>=4.0'} dev: true - /estree-walker@2.0.2: - resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} - dev: true - /estree-walker@3.0.3: resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} dependencies: @@ -3270,42 +3024,42 @@ packages: /express-urlrewrite@1.4.0: resolution: {integrity: sha512-PI5h8JuzoweS26vFizwQl6UTF25CAHSggNv0J25Dn/IKZscJHWZzPrI5z2Y2jgOzIaw2qh8l6+/jUcig23Z2SA==} dependencies: - debug: 4.3.5 + debug: 4.3.6 path-to-regexp: 1.8.0 transitivePeerDependencies: - supports-color dev: true - /express@4.19.2: - resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} + /express@4.21.0: + resolution: {integrity: sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.2 + body-parser: 1.20.3 content-disposition: 0.5.4 content-type: 1.0.5 cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9 depd: 2.0.0 - encodeurl: 1.0.2 + encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 - finalhandler: 1.2.0 + finalhandler: 1.3.1 fresh: 0.5.2 http-errors: 2.0.0 - merge-descriptors: 1.0.1 + merge-descriptors: 1.0.3 methods: 1.1.2 on-finished: 2.4.1 parseurl: 1.3.3 - path-to-regexp: 0.1.7 + path-to-regexp: 0.1.10 proxy-addr: 2.0.7 - qs: 6.11.0 + qs: 6.13.0 range-parser: 1.2.1 safe-buffer: 5.2.1 - send: 0.18.0 - serve-static: 1.15.0 + send: 0.19.0 + serve-static: 1.16.2 setprototypeof: 1.2.0 statuses: 2.0.1 type-is: 1.6.18 @@ -3349,7 +3103,7 @@ packages: '@nodelib/fs.walk': 1.2.8 glob-parent: 5.1.2 merge2: 1.4.1 - micromatch: 4.0.7 + micromatch: 4.0.8 dev: true /fast-json-stable-stringify@2.1.0: @@ -3360,6 +3114,10 @@ packages: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} dev: true + /fast-uri@3.0.1: + resolution: {integrity: sha512-MWipKbbYiYI0UC7cl8m/i/IWTqfC8YXsqjzybjddLsFjStroQzsHXkc73JutMvBiXmOvapk+axIl79ig5t55Bw==} + dev: true + /fastq@1.17.1: resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} dependencies: @@ -3392,12 +3150,12 @@ packages: to-regex-range: 5.0.1 dev: true - /finalhandler@1.2.0: - resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} + /finalhandler@1.3.1: + resolution: {integrity: sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==} engines: {node: '>= 0.8'} dependencies: debug: 2.6.9 - encodeurl: 1.0.2 + encodeurl: 2.0.0 escape-html: 1.0.3 on-finished: 2.4.1 parseurl: 1.3.3 @@ -3447,8 +3205,8 @@ packages: is-callable: 1.2.7 dev: true - /foreground-child@3.2.1: - resolution: {integrity: sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==} + /foreground-child@3.3.0: + resolution: {integrity: sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==} engines: {node: '>=14'} dependencies: cross-spawn: 7.0.3 @@ -3519,11 +3277,6 @@ packages: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} dev: true - /gensync@1.0.0-beta.2: - resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} - engines: {node: '>=6.9.0'} - dev: true - /get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} @@ -3594,14 +3347,13 @@ packages: resolution: {integrity: sha512-m5blUd3/OqDTWwzBBtWBPrGlAzatRywHameHeekAZyZrskYouOGdNB8T/q6JucucvJXtOuyHIn0/Yia7iDasDw==} dev: true - /glob@10.4.2: - resolution: {integrity: sha512-GwMlUF6PkPo3Gk21UxkCohOv0PLcIXVtKyLlpEI28R/cO/4eNOdmLk3CMW1wROV/WR/EsZOWAfBbBOqYvs88/w==} - engines: {node: '>=16 || 14 >=14.18'} + /glob@10.4.5: + resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} hasBin: true dependencies: - foreground-child: 3.2.1 - jackspeak: 3.4.0 - minimatch: 9.0.4 + foreground-child: 3.3.0 + jackspeak: 3.4.3 + minimatch: 9.0.5 minipass: 7.1.2 package-json-from-dist: 1.0.0 path-scurry: 1.11.1 @@ -3629,6 +3381,7 @@ packages: /globals@11.12.0: resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} engines: {node: '>=4'} + dev: false /globals@13.24.0: resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} @@ -3652,7 +3405,7 @@ packages: array-union: 2.1.0 dir-glob: 3.0.1 fast-glob: 3.3.2 - ignore: 5.3.1 + ignore: 5.3.2 merge2: 1.4.1 slash: 3.0.0 dev: true @@ -3766,7 +3519,7 @@ packages: /history@4.10.1: resolution: {integrity: sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 loose-envify: 1.4.0 resolve-pathname: 3.0.0 tiny-invariant: 1.3.3 @@ -3774,12 +3527,6 @@ packages: value-equal: 1.0.1 dev: false - /history@5.3.0: - resolution: {integrity: sha512-ZqaKwjjrAYUYfLG+htGaIIZ4nioX2L70ZUMIFysS3xvBsSG4x/n1V6TXV3N8ZYNuFGlDirFg32T7B6WOUPDYcQ==} - dependencies: - '@babel/runtime': 7.24.7 - dev: true - /hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} dependencies: @@ -3817,7 +3564,7 @@ packages: engines: {node: '>= 14'} dependencies: agent-base: 7.1.1 - debug: 4.3.5 + debug: 4.3.6 transitivePeerDependencies: - supports-color dev: true @@ -3836,7 +3583,7 @@ packages: engines: {node: '>= 14'} dependencies: agent-base: 7.1.1 - debug: 4.3.5 + debug: 4.3.6 transitivePeerDependencies: - supports-color dev: true @@ -3869,8 +3616,8 @@ packages: engines: {node: '>= 4'} dev: true - /ignore@5.3.1: - resolution: {integrity: sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==} + /ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} engines: {node: '>= 4'} dev: true @@ -4006,8 +3753,8 @@ packages: ci-info: 2.0.0 dev: true - /is-core-module@2.14.0: - resolution: {integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==} + /is-core-module@2.15.1: + resolution: {integrity: sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==} engines: {node: '>= 0.4'} dependencies: hasown: 2.0.2 @@ -4220,9 +3967,8 @@ packages: resolution: {integrity: sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==} dev: true - /jackspeak@3.4.0: - resolution: {integrity: sha512-JVYhQnN59LVPFCEcVa2C3CrEKYacvjRfqIQl+h8oi91aLYQVWRYbxjPcv1bUiUy/kLmQaANrYfNMCO3kuEDHfw==} - engines: {node: '>=14'} + /jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} dependencies: '@isaacs/cliui': 8.0.2 optionalDependencies: @@ -4253,19 +3999,12 @@ packages: esprima: 4.0.1 dev: true - /js-yaml@4.1.0: - resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} - hasBin: true - dependencies: - argparse: 2.0.1 - dev: true - /jsbn@0.1.1: resolution: {integrity: sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==} dev: true - /jsdom@24.1.1: - resolution: {integrity: sha512-5O1wWV99Jhq4DV7rCLIoZ/UIhyQeDR7wHVyZAHAshbrvZsLs+Xzz7gtwnlJTJDjleiTKh54F4dXrX70vJQTyJQ==} + /jsdom@24.1.3: + resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==} engines: {node: '>=18'} peerDependencies: canvas: ^2.11.2 @@ -4304,6 +4043,7 @@ packages: resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==} engines: {node: '>=4'} hasBin: true + dev: false /json-buffer@3.0.0: resolution: {integrity: sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==} @@ -4319,6 +4059,7 @@ packages: /json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + dev: false /json-parse-helpfulerror@1.0.3: resolution: {integrity: sha512-XgP0FGR77+QhUxjXkwOMkC94k3WtqEBfcnjWqhRd82qTat4SWKRE+9kUnynz/shm3I4ea2+qISvTIeGTNU7kJg==} @@ -4343,13 +4084,13 @@ packages: engines: {node: '>=8'} hasBin: true dependencies: - body-parser: 1.20.2 + body-parser: 1.20.3 chalk: 2.4.2 compression: 1.7.4 connect-pause: 0.1.1 cors: 2.8.5 errorhandler: 1.5.1 - express: 4.19.2 + express: 4.21.0 express-urlrewrite: 1.4.0 json-parse-helpfulerror: 1.0.3 lodash: 4.17.21 @@ -4431,7 +4172,7 @@ packages: image-size: 0.5.5 make-dir: 2.1.0 mime: 1.6.0 - native-request: 1.1.0 + native-request: 1.1.2 source-map: 0.6.1 /levn@0.4.1: @@ -4460,7 +4201,7 @@ packages: engines: {node: '>=14'} dependencies: mlly: 1.7.1 - pkg-types: 1.1.1 + pkg-types: 1.2.0 dev: true /locate-path@3.0.0: @@ -4518,12 +4259,6 @@ packages: steno: 0.4.4 dev: true - /lower-case@2.0.2: - resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} - dependencies: - tslib: 2.6.3 - dev: true - /lowercase-keys@1.0.1: resolution: {integrity: sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==} engines: {node: '>=0.10.0'} @@ -4534,9 +4269,8 @@ packages: engines: {node: '>=8'} dev: true - /lru-cache@10.2.2: - resolution: {integrity: sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==} - engines: {node: 14 || >=16.14} + /lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} dev: true /lru-cache@4.1.5: @@ -4546,21 +4280,15 @@ packages: yallist: 2.1.2 dev: true - /lru-cache@5.1.1: - resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} - dependencies: - yallist: 3.1.1 - dev: true - /lz-string@1.5.0: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true dev: true - /magic-string@0.30.10: - resolution: {integrity: sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==} + /magic-string@0.30.11: + resolution: {integrity: sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==} dependencies: - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 dev: true /make-dir@1.3.0: @@ -4593,8 +4321,8 @@ packages: engines: {node: '>= 0.10.0'} dev: true - /merge-descriptors@1.0.1: - resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==} + /merge-descriptors@1.0.3: + resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} dev: true /merge-stream@2.0.0: @@ -4623,8 +4351,8 @@ packages: engines: {node: '>= 0.6'} dev: true - /micromatch@4.0.7: - resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==} + /micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} engines: {node: '>=8.6'} dependencies: braces: 3.0.3 @@ -4635,6 +4363,11 @@ packages: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} + /mime-db@1.53.0: + resolution: {integrity: sha512-oHlN/w+3MQ3rba9rqFr6V/ypF10LSkdwUysQL7GkXoTgIWeV+tcXGA852TBxH+gsh8UWoyhR1hKcoMJTuWflpg==} + engines: {node: '>= 0.6'} + dev: true + /mime-types@2.1.35: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} @@ -4684,8 +4417,8 @@ packages: brace-expansion: 1.1.11 dev: true - /minimatch@9.0.4: - resolution: {integrity: sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==} + /minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} engines: {node: '>=16 || 14 >=14.17'} dependencies: brace-expansion: 2.0.1 @@ -4703,10 +4436,10 @@ packages: /mlly@1.7.1: resolution: {integrity: sha512-rrVRZRELyQzrIUAVMHxP97kv+G786pHmOKzuFII8zDYahFBS7qnHh2AlYSl1GAHhaMPCz6/oHjVMcfFYgFYHgA==} dependencies: - acorn: 8.12.0 + acorn: 8.12.1 pathe: 1.1.2 - pkg-types: 1.1.1 - ufo: 1.5.3 + pkg-types: 1.2.0 + ufo: 1.5.4 dev: true /moment@2.30.1: @@ -4795,8 +4528,8 @@ packages: hasBin: true dev: true - /native-request@1.1.0: - resolution: {integrity: sha512-uZ5rQaeRn15XmpgE0xoPL8YWqcX90VtCFglYwAgkvKM5e8fog+vePLAhHxuuv/gRkrQxIeh5U3q9sMNUrENqWw==} + /native-request@1.1.2: + resolution: {integrity: sha512-/etjwrK0J4Ebbcnt35VMWnfiUX/B04uwGJxyJInagxDqf2z5drSt/lsOvEMWGYunz1kaLZAFrV4NDAbOoDKvAQ==} requiresBuild: true optional: true @@ -4817,13 +4550,6 @@ packages: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} dev: true - /no-case@3.0.4: - resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==} - dependencies: - lower-case: 2.0.2 - tslib: 2.6.3 - dev: true - /node-fetch@2.7.0: resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} engines: {node: 4.x || >=6.0.0} @@ -4836,10 +4562,6 @@ packages: whatwg-url: 5.0.0 dev: true - /node-releases@2.0.14: - resolution: {integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==} - dev: true - /normalize-package-data@2.5.0: resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} dependencies: @@ -5025,7 +4747,7 @@ packages: resolution: {integrity: sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==} engines: {node: '>=18'} dependencies: - yocto-queue: 1.0.0 + yocto-queue: 1.1.1 dev: true /p-locate@3.0.0: @@ -5076,6 +4798,7 @@ packages: error-ex: 1.3.2 json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 + dev: false /parse-ms@2.1.0: resolution: {integrity: sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==} @@ -5129,12 +4852,12 @@ packages: resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} engines: {node: '>=16 || 14 >=14.18'} dependencies: - lru-cache: 10.2.2 + lru-cache: 10.4.3 minipass: 7.1.2 dev: true - /path-to-regexp@0.1.7: - resolution: {integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==} + /path-to-regexp@0.1.10: + resolution: {integrity: sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==} dev: true /path-to-regexp@1.8.0: @@ -5199,8 +4922,8 @@ packages: engines: {node: '>= 6'} dev: true - /pkg-types@1.1.1: - resolution: {integrity: sha512-ko14TjmDuQJ14zsotODv7dBlwxKhUKQEhuhmbqo1uCi9BB0Z2alo/wAXg6q1dTR5TyuqYyWhjtfe/Tsh+X28jQ==} + /pkg-types@1.2.0: + resolution: {integrity: sha512-+ifYuSSqOQ8CqP4MbZA5hDpb97n3E8SVWdJe+Wms9kj745lmd3b7EZJiqvmLwAlmRfjrI7Hi5z3kdBJ93lFNPA==} dependencies: confbox: 0.1.7 mlly: 1.7.1 @@ -5223,8 +4946,8 @@ packages: engines: {node: '>= 0.4'} dev: true - /postcss@8.4.38: - resolution: {integrity: sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==} + /postcss@8.4.41: + resolution: {integrity: sha512-TesUflQ0WKZqAvg52PWL6kHgLKP6xB6heTOdoYM0Wt2UHyxNa4K25EZZMgKns3BH1RLVbZCREPpLY0rhnNoHVQ==} engines: {node: ^10 || ^12 || >=14} dependencies: nanoid: 3.3.7 @@ -5329,8 +5052,8 @@ packages: engines: {node: '>=6'} dev: true - /qs@6.11.0: - resolution: {integrity: sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==} + /qs@6.13.0: + resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} engines: {node: '>=0.6'} dependencies: side-channel: 1.0.6 @@ -5370,7 +5093,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 dom-align: 1.12.4 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5385,7 +5108,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 array-tree-filter: 2.1.0 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5400,7 +5123,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5412,7 +5135,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5427,7 +5150,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5441,7 +5164,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5454,7 +5177,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5468,7 +5191,7 @@ packages: react: '>= 16.9.0' react-dom: '>= 16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 async-validator: 3.5.2 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5481,7 +5204,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-dialog: 8.5.3(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5495,7 +5218,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5508,7 +5231,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) rc-textarea: 0.3.7(react-dom@16.14.0)(react@16.14.0) @@ -5524,7 +5247,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 mini-store: 3.0.6(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5542,7 +5265,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5556,7 +5279,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5570,7 +5293,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5584,7 +5307,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5597,10 +5320,10 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 date-fns: 2.30.0 - dayjs: 1.11.11 + dayjs: 1.11.13 moment: 2.30.1 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5615,7 +5338,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5628,7 +5351,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5641,7 +5364,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5656,13 +5379,13 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-overflow: 1.3.2(react-dom@16.14.0)(react@16.14.0) rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) - rc-virtual-list: 3.14.3(react-dom@16.14.0)(react@16.14.0) + rc-virtual-list: 3.14.5(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) dev: false @@ -5674,7 +5397,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-tooltip: 5.0.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5690,7 +5413,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5703,7 +5426,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5717,7 +5440,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5733,7 +5456,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-dropdown: 3.2.5(react-dom@16.14.0)(react@16.14.0) rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) @@ -5749,7 +5472,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5764,7 +5487,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5776,7 +5499,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-select: 12.1.13(react-dom@16.14.0)(react@16.14.0) rc-tree: 4.1.5(react-dom@16.14.0)(react@16.14.0) @@ -5792,11 +5515,11 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) - rc-virtual-list: 3.14.3(react-dom@16.14.0)(react@16.14.0) + rc-virtual-list: 3.14.5(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) dev: false @@ -5808,7 +5531,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-align: 4.0.15(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5823,7 +5546,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5836,20 +5559,20 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) react-is: 18.3.1 dev: false - /rc-virtual-list@3.14.3(react-dom@16.14.0)(react@16.14.0): - resolution: {integrity: sha512-6+6wiEhdqakNBnbRJymgMlh+90qpkgqherTRo1l1cX7mK6F9hWsazPczmP0lA+64yhC9/t+M9Dh5pjvDWimn8A==} + /rc-virtual-list@3.14.5(react-dom@16.14.0)(react@16.14.0): + resolution: {integrity: sha512-ZMOnkCLv2wUN8Jz7yI4XiSLa9THlYvf00LuMhb1JlsQCewuU7ydPuHw1rGVPhe9VZYl/5UqODtNd7QKJ2DMGfg==} engines: {node: '>=8.x'} peerDependencies: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5897,17 +5620,12 @@ packages: /react-is@18.3.1: resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} - /react-refresh@0.14.2: - resolution: {integrity: sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==} - engines: {node: '>=0.10.0'} - dev: true - /react-router-dom@5.3.4(react@16.14.0): resolution: {integrity: sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==} peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 history: 4.10.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -5922,7 +5640,7 @@ packages: peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 history: 4.10.1 hoist-non-react-statics: 3.3.2 loose-envify: 1.4.0 @@ -5940,7 +5658,7 @@ packages: react: ^16.8.0 || ^17.0.0 react-dom: ^16.8.0 || ^17.0.0 dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 '@emotion/cache': 10.0.29 '@emotion/core': 10.3.1(react@16.14.0) '@emotion/css': 10.0.27 @@ -5960,7 +5678,7 @@ packages: react: '>=16.6.0' react-dom: '>=16.6.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.6 dom-helpers: 5.2.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -6057,7 +5775,7 @@ packages: deprecated: request has been deprecated, see https://github.com/request/request/issues/3142 dependencies: aws-sign2: 0.7.0 - aws4: 1.13.0 + aws4: 1.13.2 caseless: 0.12.0 combined-stream: 1.0.8 extend: 3.0.2 @@ -6112,7 +5830,7 @@ packages: resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} hasBin: true dependencies: - is-core-module: 2.14.0 + is-core-module: 2.15.1 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 @@ -6143,37 +5861,37 @@ packages: glob: 7.2.3 dev: true - /rollup@3.29.4: - resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==} + /rollup@3.29.5: + resolution: {integrity: sha512-GVsDdsbJzzy4S/v3dqWPJ7EfvZJfCHiDqe80IyrF59LYuP+e6U1LJoUqeuqRbwAWoMNoXivMNeNAOf5E22VA1w==} engines: {node: '>=14.18.0', npm: '>=8.0.0'} hasBin: true optionalDependencies: fsevents: 2.3.3 dev: true - /rollup@4.18.0: - resolution: {integrity: sha512-QmJz14PX3rzbJCN1SG4Xe/bAAX2a6NpCP8ab2vfu2GiUr8AQcr2nCV/oEO3yneFarB67zk8ShlIyWb2LGTb3Sg==} + /rollup@4.21.1: + resolution: {integrity: sha512-ZnYyKvscThhgd3M5+Qt3pmhO4jIRR5RGzaSovB6Q7rGNrK5cUncrtLmcTTJVSdcKXyZjW8X8MB0JMSuH9bcAJg==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true dependencies: '@types/estree': 1.0.5 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.18.0 - '@rollup/rollup-android-arm64': 4.18.0 - '@rollup/rollup-darwin-arm64': 4.18.0 - '@rollup/rollup-darwin-x64': 4.18.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.18.0 - '@rollup/rollup-linux-arm-musleabihf': 4.18.0 - '@rollup/rollup-linux-arm64-gnu': 4.18.0 - '@rollup/rollup-linux-arm64-musl': 4.18.0 - '@rollup/rollup-linux-powerpc64le-gnu': 4.18.0 - '@rollup/rollup-linux-riscv64-gnu': 4.18.0 - '@rollup/rollup-linux-s390x-gnu': 4.18.0 - '@rollup/rollup-linux-x64-gnu': 4.18.0 - '@rollup/rollup-linux-x64-musl': 4.18.0 - '@rollup/rollup-win32-arm64-msvc': 4.18.0 - '@rollup/rollup-win32-ia32-msvc': 4.18.0 - '@rollup/rollup-win32-x64-msvc': 4.18.0 + '@rollup/rollup-android-arm-eabi': 4.21.1 + '@rollup/rollup-android-arm64': 4.21.1 + '@rollup/rollup-darwin-arm64': 4.21.1 + '@rollup/rollup-darwin-x64': 4.21.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.21.1 + '@rollup/rollup-linux-arm-musleabihf': 4.21.1 + '@rollup/rollup-linux-arm64-gnu': 4.21.1 + '@rollup/rollup-linux-arm64-musl': 4.21.1 + '@rollup/rollup-linux-powerpc64le-gnu': 4.21.1 + '@rollup/rollup-linux-riscv64-gnu': 4.21.1 + '@rollup/rollup-linux-s390x-gnu': 4.21.1 + '@rollup/rollup-linux-x64-gnu': 4.21.1 + '@rollup/rollup-linux-x64-musl': 4.21.1 + '@rollup/rollup-win32-arm64-msvc': 4.21.1 + '@rollup/rollup-win32-ia32-msvc': 4.21.1 + '@rollup/rollup-win32-x64-msvc': 4.21.1 fsevents: 2.3.3 dev: true @@ -6199,7 +5917,7 @@ packages: /rxjs@7.8.1: resolution: {integrity: sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==} dependencies: - tslib: 2.6.3 + tslib: 2.7.0 dev: true /safe-array-concat@1.1.2: @@ -6272,14 +5990,14 @@ packages: hasBin: true dev: true - /semver@7.6.2: - resolution: {integrity: sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==} + /semver@7.6.3: + resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==} engines: {node: '>=10'} hasBin: true dev: true - /send@0.18.0: - resolution: {integrity: sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==} + /send@0.19.0: + resolution: {integrity: sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==} engines: {node: '>= 0.8.0'} dependencies: debug: 2.6.9 @@ -6299,14 +6017,14 @@ packages: - supports-color dev: true - /serve-static@1.15.0: - resolution: {integrity: sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==} + /serve-static@1.16.2: + resolution: {integrity: sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==} engines: {node: '>= 0.8.0'} dependencies: - encodeurl: 1.0.2 + encodeurl: 2.0.0 escape-html: 1.0.3 parseurl: 1.3.3 - send: 0.18.0 + send: 0.19.0 transitivePeerDependencies: - supports-color dev: true @@ -6319,8 +6037,8 @@ packages: resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} dev: true - /set-cookie-parser@2.6.0: - resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==} + /set-cookie-parser@2.7.0: + resolution: {integrity: sha512-lXLOiqpkUumhRdFF3k1osNXCy9akgx/dyPZ5p8qAg9seJzXr5ZrlqZuWIMuY6ejOsVLE6flJ5/h3lsn57fQ/PQ==} dev: true /set-function-length@1.2.2: @@ -6418,13 +6136,6 @@ packages: is-fullwidth-code-point: 3.0.0 dev: true - /snake-case@3.0.4: - resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} - dependencies: - dot-case: 3.0.4 - tslib: 2.6.3 - dev: true - /source-map-js@1.2.0: resolution: {integrity: sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==} engines: {node: '>=0.10.0'} @@ -6445,7 +6156,7 @@ packages: resolution: {integrity: sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==} dependencies: spdx-expression-parse: 3.0.1 - spdx-license-ids: 3.0.18 + spdx-license-ids: 3.0.20 dev: true /spdx-exceptions@2.5.0: @@ -6456,11 +6167,11 @@ packages: resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} dependencies: spdx-exceptions: 2.5.0 - spdx-license-ids: 3.0.18 + spdx-license-ids: 3.0.20 dev: true - /spdx-license-ids@3.0.18: - resolution: {integrity: sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ==} + /spdx-license-ids@3.0.20: + resolution: {integrity: sha512-jg25NiDV/1fLtSgEgyvVyDunvaNHbuwF9lfNV17gSmPFAlYzdfNBlLtLzXTevwkPj7DhGbmN9VnmJIgLnhvaBw==} dev: true /sprintf-js@1.0.3: @@ -6674,7 +6385,7 @@ packages: dependencies: '@jridgewell/gen-mapping': 0.3.5 commander: 4.1.1 - glob: 10.4.2 + glob: 10.4.5 lines-and-columns: 1.2.4 mz: 2.7.0 pirates: 4.0.6 @@ -6698,10 +6409,6 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - /svg-parser@2.0.4: - resolution: {integrity: sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==} - dev: true - /symbol-tree@3.2.4: resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} dev: true @@ -6710,7 +6417,7 @@ packages: resolution: {integrity: sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==} engines: {node: '>=10.0.0'} dependencies: - ajv: 8.16.0 + ajv: 8.17.1 lodash.truncate: 4.4.2 slice-ansi: 4.0.0 string-width: 4.2.3 @@ -6753,8 +6460,8 @@ packages: resolution: {integrity: sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==} dev: false - /tinybench@2.8.0: - resolution: {integrity: sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==} + /tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} dev: true /tinypool@0.8.4: @@ -6777,6 +6484,7 @@ packages: /to-fast-properties@2.0.0: resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} engines: {node: '>=4'} + dev: false /to-readable-stream@1.0.0: resolution: {integrity: sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==} @@ -6848,8 +6556,8 @@ packages: resolution: {integrity: sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==} dev: false - /tslib@2.6.3: - resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} + /tslib@2.7.0: + resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==} dev: true /tsutils@3.21.0(typescript@4.9.5): @@ -6879,8 +6587,8 @@ packages: prelude-ls: 1.2.1 dev: true - /type-detect@4.0.8: - resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + /type-detect@4.1.0: + resolution: {integrity: sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==} engines: {node: '>=4'} dev: true @@ -6961,8 +6669,8 @@ packages: engines: {node: '>=4.2.0'} hasBin: true - /ufo@1.5.3: - resolution: {integrity: sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==} + /ufo@1.5.4: + resolution: {integrity: sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==} dev: true /unbox-primitive@1.0.2: @@ -6974,8 +6682,8 @@ packages: which-boxed-primitive: 1.0.2 dev: true - /undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + /undici-types@6.19.8: + resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} dev: true /unique-string@1.0.0: @@ -6995,17 +6703,6 @@ packages: engines: {node: '>= 0.8'} dev: true - /update-browserslist-db@1.0.16(browserslist@4.23.1): - resolution: {integrity: sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==} - hasBin: true - peerDependencies: - browserslist: '>= 4.21.0' - dependencies: - browserslist: 4.23.1 - escalade: 3.1.2 - picocolors: 1.0.1 - dev: true - /update-notifier@3.0.1: resolution: {integrity: sha512-grrmrB6Zb8DUiyDIaeRTBCkgISYUgETNe7NglEbVsrLWXeESnlCSP50WfRSj/GmzMPl6Uchj24S/p80nP/ZQrQ==} engines: {node: '>=8'} @@ -7104,52 +6801,38 @@ packages: hasBin: true dependencies: cac: 6.7.14 - debug: 4.3.5 + debug: 4.3.6 pathe: 1.1.2 picocolors: 1.0.1 - vite: 5.3.1(less@3.13.1) + vite: 5.4.2(less@3.13.1) transitivePeerDependencies: - '@types/node' - less - lightningcss - sass + - sass-embedded - stylus - sugarss - supports-color - terser dev: true - /vite-plugin-svgr@4.2.0(typescript@4.9.5)(vite@4.5.3): - resolution: {integrity: sha512-SC7+FfVtNQk7So0XMjrrtLAbEC8qjFPifyD7+fs/E6aaNdVde6umlVVh0QuwDLdOMu7vp5RiGFsB70nj5yo0XA==} - peerDependencies: - vite: ^2.6.0 || 3 || 4 || 5 - dependencies: - '@rollup/pluginutils': 5.1.0 - '@svgr/core': 8.1.0(typescript@4.9.5) - '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0) - vite: 4.5.3(less@3.13.1) - transitivePeerDependencies: - - rollup - - supports-color - - typescript - dev: true - - /vite-tsconfig-paths@3.6.0(vite@4.5.3): + /vite-tsconfig-paths@3.6.0(vite@4.5.5): resolution: {integrity: sha512-UfsPYonxLqPD633X8cWcPFVuYzx/CMNHAjZTasYwX69sXpa4gNmQkR0XCjj82h7zhLGdTWagMjC1qfb9S+zv0A==} peerDependencies: vite: '>2.0.0-0' dependencies: - debug: 4.3.5 + debug: 4.3.6 globrex: 0.1.2 recrawl-sync: 2.2.3 tsconfig-paths: 4.2.0 - vite: 4.5.3(less@3.13.1) + vite: 4.5.5(less@3.13.1) transitivePeerDependencies: - supports-color dev: true - /vite@4.5.3(less@3.13.1): - resolution: {integrity: sha512-kQL23kMeX92v3ph7IauVkXkikdDRsYMGTVl5KY2E9OY4ONLvkHf04MDTbnfo6NKxZiDLWzVpP5oTa8hQD8U3dg==} + /vite@4.5.5(less@3.13.1): + resolution: {integrity: sha512-ifW3Lb2sMdX+WU91s3R0FyQlAyLxOzCSCP37ujw0+r5POeHPwe6udWVIElKQq8gk3t7b8rkmvqC6IHBpCff4GQ==} engines: {node: ^14.18.0 || >=16.0.0} hasBin: true peerDependencies: @@ -7178,14 +6861,14 @@ packages: dependencies: esbuild: 0.18.20 less: 3.13.1 - postcss: 8.4.38 - rollup: 3.29.4 + postcss: 8.4.41 + rollup: 3.29.5 optionalDependencies: fsevents: 2.3.3 dev: true - /vite@5.3.1(less@3.13.1): - resolution: {integrity: sha512-XBmSKRLXLxiaPYamLv3/hnP/KXDai1NDexN0FpkTaZXTfycHvkRHoenpgl/fvuK/kPbB6xAgoyiryAhQNxYmAQ==} + /vite@5.4.2(less@3.13.1): + resolution: {integrity: sha512-dDrQTRHp5C1fTFzcSaMxjk6vdpKvT+2/mIdE07Gw2ykehT49O0z/VHS3zZ8iV/Gh8BJJKHWOe5RjaNrW5xf/GA==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -7193,6 +6876,7 @@ packages: less: '*' lightningcss: ^1.21.0 sass: '*' + sass-embedded: '*' stylus: '*' sugarss: '*' terser: ^5.4.0 @@ -7205,6 +6889,8 @@ packages: optional: true sass: optional: true + sass-embedded: + optional: true stylus: optional: true sugarss: @@ -7214,13 +6900,13 @@ packages: dependencies: esbuild: 0.21.5 less: 3.13.1 - postcss: 8.4.38 - rollup: 4.18.0 + postcss: 8.4.41 + rollup: 4.21.1 optionalDependencies: fsevents: 2.3.3 dev: true - /vitest@1.6.0(jsdom@24.1.1)(less@3.13.1): + /vitest@1.6.0(jsdom@24.1.3)(less@3.13.1): resolution: {integrity: sha512-H5r/dN06swuFnzNFhq/dnz37bPXnq8xB2xB5JOVk8K09rUtoeNN+LHWkoQ0A/i3hvbUKKcCei9KpbxqHMLhLLA==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true @@ -7251,25 +6937,26 @@ packages: '@vitest/spy': 1.6.0 '@vitest/utils': 1.6.0 acorn-walk: 8.3.3 - chai: 4.4.1 - debug: 4.3.5 + chai: 4.5.0 + debug: 4.3.6 execa: 8.0.1 - jsdom: 24.1.1 + jsdom: 24.1.3 local-pkg: 0.5.0 - magic-string: 0.30.10 + magic-string: 0.30.11 pathe: 1.1.2 picocolors: 1.0.1 std-env: 3.7.0 strip-literal: 2.1.0 - tinybench: 2.8.0 + tinybench: 2.9.0 tinypool: 0.8.4 - vite: 5.3.1(less@3.13.1) + vite: 5.4.2(less@3.13.1) vite-node: 1.6.0(less@3.13.1) - why-is-node-running: 2.2.2 + why-is-node-running: 2.3.0 transitivePeerDependencies: - less - lightningcss - sass + - sass-embedded - stylus - sugarss - supports-color @@ -7389,8 +7076,8 @@ packages: isexe: 2.0.0 dev: true - /why-is-node-running@2.2.2: - resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} + /why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} engines: {node: '>=8'} hasBin: true dependencies: @@ -7498,10 +7185,6 @@ packages: resolution: {integrity: sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==} dev: true - /yallist@3.1.1: - resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} - dev: true - /yaml@1.10.2: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} @@ -7548,13 +7231,13 @@ packages: yargs-parser: 21.1.1 dev: true - /yocto-queue@1.0.0: - resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} + /yocto-queue@1.1.1: + resolution: {integrity: sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==} engines: {node: '>=12.20'} dev: true - /zrender@5.5.0: - resolution: {integrity: sha512-O3MilSi/9mwoovx77m6ROZM7sXShR/O/JIanvzTwjN3FORfLSr81PsUGd7jlaYOeds9d8tw82oP44+3YucVo+w==} + /zrender@5.6.0: + resolution: {integrity: sha512-uzgraf4njmmHAbEUxMJ8Oxg+P3fT04O+9p7gY+wJRVxo8Ge+KmYv0WJev945EH4wFuc4OY2NLXz46FZrWS9xJg==} dependencies: tslib: 2.3.0 dev: false diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less index 1895cabc184..44f53fa9d47 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less @@ -164,4 +164,9 @@ body { .pointer { cursor: pointer; +} + +.data-container { + padding: 24px; + height: 80vh; } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx index 0ad6aa3f174..0b7607f2978 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx @@ -20,7 +20,9 @@ import React, { Suspense } from 'react'; import { Switch as AntDSwitch, Layout } from 'antd'; import NavBar from './components/navBar/navBar'; +import NavBarV2 from '@/v2/components/navBar/navBar'; import Breadcrumbs from './components/breadcrumbs/breadcrumbs'; +import BreadcrumbsV2 from '@/v2/components/breadcrumbs/breadcrumbs'; import { HashRouter as Router, Switch, Route, Redirect } from 'react-router-dom'; import { routes } from '@/routes'; import { routesV2 } from '@/v2/routes-v2'; @@ -30,6 +32,7 @@ import classNames from 'classnames'; import Loader from '@/v2/components/loader/loader'; import './app.less'; +import NotFound from '@/v2/pages/notFound/notFound'; const { Header, Content, Footer @@ -37,7 +40,7 @@ const { interface IAppState { collapsed: boolean; - enableNewUI: boolean; + enableOldUI: boolean; } class App extends React.Component, IAppState> { @@ -45,7 +48,7 @@ class App extends React.Component, IAppState> { super(props); this.state = { collapsed: false, - enableNewUI: false + enableOldUI: false }; } @@ -54,44 +57,54 @@ class App extends React.Component, IAppState> { }; render() { - const { collapsed, enableNewUI } = this.state; + const { collapsed, enableOldUI } = this.state; const layoutClass = classNames('content-layout', { 'sidebar-collapsed': collapsed }); return ( - + { + (enableOldUI) + ? + : + }
    -
    - - New UI
    } - onChange={(checked: boolean) => { - this.setState({ - enableNewUI: checked - }); - }} /> +
    + {(enableOldUI) ? : } + + Switch to + Old UI
    } + checkedChildren={
    New UI
    } + onChange={(checked: boolean) => { + this.setState({ + enableOldUI: checked + }); + }} /> +
    - - - - - - {(enableNewUI) - ? }> - {routesV2.map( + + }> + + + + + {(enableOldUI) + ? routes.map( (route, index) => - )} - - : routes.map( - (route, index) => - ) - } - + ) + : routesV2.map( + (route, index) => { + return + } + ) + } + + +
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx index 0230d4dd61d..6b2bab246b7 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx @@ -76,10 +76,11 @@ class AutoReloadPanel extends React.Component { ); const lastUpdatedDeltaFullText = lastUpdatedOMDBDelta === 0 || lastUpdatedOMDBDelta === undefined || lastUpdatedOMDBFull === 0 || lastUpdatedOMDBFull === undefined ? '' : + //omSyncLoad should be clickable at all times. If the response from the dbsync is false it will show DB update is already running else show triggered sync ( <>   | DB Synced at {lastUpdatedDeltaFullToolTip} -  )} + + ); + }); + breadCrumbs[breadCrumbs.length - 1] = generateSubMenu(currPath[currPath.length - 1]); + return breadCrumbs; + } + + return ( + + ) +} + +export default DUBreadcrumbNav; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx new file mode 100644 index 00000000000..f2c740f7dbc --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx @@ -0,0 +1,389 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useRef, useState } from 'react'; +import moment from 'moment'; +import { AxiosError } from 'axios'; +import { Table } from 'antd'; + +import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; + +import { Acl } from '@/v2/types/acl.types'; + + +// ------------- Types -------------- // +type CountStats = { + numBucket: number; + numDir: number; + numKey: number; + numVolume: number; +}; + +type LocationInfo = { + blockID: { + containerBlockID: { + containerID: number; + localID: number; + }; + blockCommitSequenceId: number; + containerID: number; + localID: number; + }; + length: number; + offset: number; + token: null; + createVersion: number; + pipeline: null; + partNumber: number; + containerID: number; + localID: number; + blockCommitSequenceId: number; +}; + +type ObjectInfo = { + bucketName: string; + bucketLayout: string; + encInfo: null; + fileName: string; + keyName: string; + name: string; + owner: string; + volume: string; + volumeName: string; + sourceVolume: string | null; + sourceBucket: string | null; + usedBytes: number | null; + usedNamespace: number; + storageType: string; + creationTime: number; + dataSize: number; + modificationTime: number; + quotaInBytes: number; + quotaInNamespace: number; +} + +type ReplicationConfig = { + replicationFactor: string; + requiredNodes: number; + replicationType: string; +} + +type ObjectInfoResponse = ObjectInfo & { + acls: Acl[]; + versioningEnabled: boolean; + metadata: Record; + file: boolean; + keyLocationVersions: { + version: number; + locationList: LocationInfo[]; + multipartKey: boolean; + blocksLatestVersionOnly: LocationInfo[]; + locationLists: LocationInfo[][]; + locationListCount: number; + }[]; + versioning: boolean; + encryptionInfo: null; + replicationConfig: ReplicationConfig; +}; + +type SummaryResponse = { + countStats: CountStats; + objectInfo: ObjectInfoResponse; + path: string; + status: string; + type: string; +} + +type MetadataProps = { + path: string; +}; + +type MetadataState = { + keys: string[]; + values: (string | number | boolean | null)[]; +}; + + +// ------------- Component -------------- // +const DUMetadata: React.FC = ({ + path = '/' +}) => { + const [loading, setLoading] = useState(false); + const [state, setState] = useState({ + keys: [], + values: [] + }); + const cancelSummarySignal = useRef(); + const keyMetadataSummarySignal = useRef(); + const cancelQuotaSignal = useRef(); + + const getObjectInfoMapping = React.useCallback((summaryResponse) => { + + const keys: string[] = []; + const values: (string | number | boolean | null)[] = []; + /** + * We are creating a specific set of keys under Object Info response + * which do not require us to modify anything + */ + const selectedInfoKeys = [ + 'bucketName', 'bucketLayout', 'encInfo', 'fileName', 'keyName', + 'name', 'owner', 'sourceBucket', 'sourceVolume', 'storageType', + 'usedNamespace', 'volumeName', 'volume' + ] as const; + const objectInfo: ObjectInfo = summaryResponse.objectInfo ?? {}; + + selectedInfoKeys.forEach((key) => { + if (objectInfo[key as keyof ObjectInfo] !== undefined && objectInfo[key as keyof ObjectInfo] !== -1) { + // We will use regex to convert the Object key from camel case to space separated title + // The following regex will match abcDef and produce Abc Def + let keyName = key.replace(/([a-z0-9])([A-Z])/g, '$1 $2'); + keyName = keyName.charAt(0).toUpperCase() + keyName.slice(1); + keys.push(keyName); + values.push(objectInfo[key as keyof ObjectInfo]); + } + }); + + if (objectInfo?.creationTime !== undefined && objectInfo?.creationTime !== -1) { + keys.push('Creation Time'); + values.push(moment(objectInfo.creationTime).format('ll LTS')); + } + + if (objectInfo?.usedBytes !== undefined && objectInfo?.usedBytes !== -1 && objectInfo!.usedBytes !== null) { + keys.push('Used Bytes'); + values.push(byteToSize(objectInfo.usedBytes, 3)); + } + + if (objectInfo?.dataSize !== undefined && objectInfo?.dataSize !== -1) { + keys.push('Data Size'); + values.push(byteToSize(objectInfo.dataSize, 3)); + } + + if (objectInfo?.modificationTime !== undefined && objectInfo?.modificationTime !== -1) { + keys.push('Modification Time'); + values.push(moment(objectInfo.modificationTime).format('ll LTS')); + } + + if (objectInfo?.quotaInBytes !== undefined && objectInfo?.quotaInBytes !== -1) { + keys.push('Quota In Bytes'); + values.push(byteToSize(objectInfo.quotaInBytes, 3)); + } + + if (objectInfo?.quotaInNamespace !== undefined && objectInfo?.quotaInNamespace !== -1) { + keys.push('Quota In Namespace'); + values.push(byteToSize(objectInfo.quotaInNamespace, 3)); + } + + if (summaryResponse.objectInfo?.replicationConfig?.replicationFactor !== undefined) { + keys.push('Replication Factor'); + values.push(summaryResponse.objectInfo.replicationConfig.replicationFactor); + } + + if (summaryResponse.objectInfo?.replicationConfig?.replicationType !== undefined) { + keys.push('Replication Type'); + values.push(summaryResponse.objectInfo.replicationConfig.replicationType); + } + + if (summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== undefined + && summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== -1) { + keys.push('Replication Required Nodes'); + values.push(summaryResponse.objectInfo.replicationConfig.requiredNodes); + } + + return { keys, values } + }, [path]); + + function loadMetadataSummary(path: string) { + cancelRequests([ + cancelSummarySignal.current!, + keyMetadataSummarySignal.current! + ]); + const keys: string[] = []; + const values: (string | number | boolean | null)[] = []; + + const { request, controller } = AxiosGetHelper( + `/api/v1/namespace/summary?path=${path}`, + cancelSummarySignal.current + ); + cancelSummarySignal.current = controller; + + request.then(response => { + const summaryResponse: SummaryResponse = response.data; + keys.push('Entity Type'); + values.push(summaryResponse.type); + + if (summaryResponse.status === 'INITIALIZING') { + showDataFetchError(`The metadata is currently initializing. Please wait a moment and try again later`); + return; + } + + if (summaryResponse.status === 'PATH_NOT_FOUND') { + showDataFetchError(`Invalid Path: ${path}`); + return; + } + + // If the entity is a Key then fetch the Key metadata only + if (summaryResponse.type === 'KEY') { + const { request: metadataRequest, controller: metadataNewController } = AxiosGetHelper( + `/api/v1/namespace/du?path=${path}&replica=true`, + keyMetadataSummarySignal.current + ); + keyMetadataSummarySignal.current = metadataNewController; + metadataRequest.then(response => { + keys.push('File Size'); + values.push(byteToSize(response.data.size, 3)); + keys.push('File Size With Replication'); + values.push(byteToSize(response.data.sizeWithReplica, 3)); + keys.push("Creation Time"); + values.push(moment(summaryResponse.objectInfo.creationTime).format('ll LTS')); + keys.push("Modification Time"); + values.push(moment(summaryResponse.objectInfo.modificationTime).format('ll LTS')); + + setState({ + keys: keys, + values: values + }); + }).catch(error => { + showDataFetchError(error.toString()); + }); + return; + } + + /** + * Will iterate over the keys of the countStats to avoid multiple if blocks + * and check from the map for the respective key name / title to insert + */ + const countStats: CountStats = summaryResponse.countStats ?? {}; + const keyToNameMap: Record = { + numVolume: 'Volumes', + numBucket: 'Buckets', + numDir: 'Total Directories', + numKey: 'Total Keys' + } + Object.keys(countStats).forEach((key: string) => { + if (countStats[key as keyof CountStats] !== undefined + && countStats[key as keyof CountStats] !== -1) { + keys.push(keyToNameMap[key]); + values.push(countStats[key as keyof CountStats]); + } + }) + + const { + keys: objectInfoKeys, + values: objectInfoValues + } = getObjectInfoMapping(summaryResponse); + + keys.push(...objectInfoKeys); + values.push(...objectInfoValues); + + setState({ + keys: keys, + values: values + }); + }).catch(error => { + showDataFetchError((error as AxiosError).toString()); + }); + } + + function loadQuotaSummary(path: string) { + cancelRequests([ + cancelQuotaSignal.current! + ]); + + const { request, controller } = AxiosGetHelper( + `/api/v1/namespace/quota?path=${path}`, + cancelQuotaSignal.current + ); + cancelQuotaSignal.current = controller; + + request.then(response => { + const quotaResponse = response.data; + + if (quotaResponse.status === 'INITIALIZING') { + return; + } + if (quotaResponse.status === 'TYPE_NOT_APPLICABLE') { + return; + } + if (quotaResponse.status === 'PATH_NOT_FOUND') { + showDataFetchError(`Invalid Path: ${path}`); + return; + } + + const keys: string[] = []; + const values: (string | number | boolean | null)[] = []; + // Append quota information + // In case the object's quota isn't set + if (quotaResponse.allowed !== undefined && quotaResponse.allowed !== -1) { + keys.push('Quota Allowed'); + values.push(byteToSize(quotaResponse.allowed, 3)); + } + + if (quotaResponse.used !== undefined && quotaResponse.used !== -1) { + keys.push('Quota Used'); + values.push(byteToSize(quotaResponse.used, 3)); + } + setState((prevState) => ({ + keys: [...prevState.keys, ...keys], + values: [...prevState.values, ...values] + })); + }).catch(error => { + showDataFetchError(error.toString()); + }); + } + + React.useEffect(() => { + setLoading(true); + loadMetadataSummary(path); + loadQuotaSummary(path); + setLoading(false); + + return (() => { + cancelRequests([ + cancelSummarySignal.current!, + keyMetadataSummarySignal.current!, + cancelQuotaSignal.current! + ]); + }) + }, [path]); + + const content = []; + for (const [i, v] of state.keys.entries()) { + content.push({ + key: v, + value: state.values[i] + }); + } + + return ( +
    {{typestat.key}}{{typestat.value[0]}} {{typestat.value[1]}}
    + + +
    + ); +} + +export default DUMetadata; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx index 79fa0760338..9d483efd6b0 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx @@ -28,6 +28,10 @@ export interface EChartProps { loading?: boolean; theme?: 'light'; onClick?: () => any | void; + eventHandler?: { + name: string, + handler: (arg0: any) => void + }; } const EChart = ({ @@ -36,7 +40,8 @@ const EChart = ({ settings, loading, theme, - onClick + onClick, + eventHandler }: EChartProps): JSX.Element => { const chartRef = useRef(null); useEffect(() => { @@ -47,6 +52,10 @@ const EChart = ({ if (onClick) { chart.on('click', onClick); } + + if (eventHandler) { + chart.on(eventHandler.name, eventHandler.handler); + } } // Add chart resize listener @@ -71,6 +80,10 @@ const EChart = ({ if (onClick) { chart!.on('click', onClick); } + + if (eventHandler) { + chart!.on(eventHandler.name, eventHandler.handler); + } } }, [option, settings, theme]); // Whenever theme changes we need to add option and setting due to it being deleted in cleanup function diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less new file mode 100644 index 00000000000..09ec283d555 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less @@ -0,0 +1,65 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.logo-v2 { + color: #FFFFFF; + font-size: 18px; + font-weight: 500; + padding: 20px; + background-color: #142329; + .logo-text-v2 { + margin-left: 10px; + } +} + +.ant-layout-sider-collapsed { + .logo-v2 { + padding: 10px; + + .logo-text-v2 { + display: none; + } + } + .ant-layout-sider-trigger { + background: #142329 !important; + text-align: center !important; + padding-left: 20px !important; + } +} + +.ant-layout-sider { + background: #142329 !important; + + .ant-menu-dark { + background: #142329 !important; + + .ant-menu-item-selected { + span { + color: #4DCF4C !important; + } + background: #224452 !important; + color: #4DCF4C !important; + } + } + + .ant-layout-sider-trigger { + background: #142329 !important; + text-align: unset !important; + padding-left: 25px; + } +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx new file mode 100644 index 00000000000..1dd1ede48db --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useState, useEffect, useRef } from 'react'; +import axios, { AxiosResponse } from 'axios'; +import { Layout, Menu, Spin } from 'antd'; +import { + BarChartOutlined, + ClusterOutlined, + ContainerOutlined, + DashboardOutlined, + DatabaseOutlined, + DeploymentUnitOutlined, + FolderOpenOutlined, + InboxOutlined, + LayoutOutlined, + PieChartOutlined +} from '@ant-design/icons'; +import { useLocation, Link } from 'react-router-dom'; + + +import logo from '@/logo.png'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; + +import './navBar.less'; + + +// ------------- Types -------------- // +type NavBarProps = { + collapsed: boolean; + onCollapse: (arg0: boolean) => void; +} + +const NavBar: React.FC = ({ + collapsed = false, + onCollapse = () => { } +}) => { + const [isHeatmapEnabled, setIsHeatmapEnabled] = useState(false); + const cancelDisabledFeatureSignal = useRef(); + const location = useLocation(); + + const fetchDisabledFeatures = async () => { + const disabledfeaturesEndpoint = `/api/v1/features/disabledFeatures`; + const { request, controller } = AxiosGetHelper( + disabledfeaturesEndpoint, + cancelDisabledFeatureSignal.current + ) + cancelDisabledFeatureSignal.current = controller; + try { + const response: AxiosResponse = await request; + const heatmapDisabled = response?.data?.includes('HEATMAP') + setIsHeatmapEnabled(!heatmapDisabled); + } catch (error: unknown) { + showDataFetchError((error as Error).toString()) + } + } + + + useEffect(() => { + fetchDisabledFeatures(); + // Component will unmount + return (() => { + cancelRequests([cancelDisabledFeatureSignal.current!]) + }) + }, []) + + const menuItems = [( + }> + Overview + + + ), ( + }> + Volumes + + + ), ( + }> + Buckets + + + ), ( + }> + Datanodes + + + ), ( + }> + Pipelines + + + ), ( + }> + Containers + + + ), ( + }> + }> + Insights + + + }> + OM DB Insights + + + + ), ( + }> + Disk Usage + + + ), ( + isHeatmapEnabled && + }> + Heatmap + + + )] + return ( + +
    + Ozone Recon Logo + Ozone Recon +
    + + {...menuItems} + +
    + ); +} + +export default NavBar; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx index e383512f20e..8736b3e0d29 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx @@ -39,6 +39,7 @@ type OverviewTableCardProps = { data?: string | React.ReactElement; linkToUrl?: string; showHeader?: boolean; + state?: Record; } // ------------- Styles -------------- // @@ -63,15 +64,18 @@ const OverviewSummaryCard: React.FC = ({ columns = [], tableData = [], linkToUrl = '', - showHeader = false + showHeader = false, + state }) => { - const titleElement = (linkToUrl) ? (
    {title} View Insights diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx new file mode 100644 index 00000000000..2601905a142 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import EChart from '@/v2/components/eChart/eChart'; +import { byteToSize } from '@/utils/common'; +import { DUSubpath } from '@/v2/types/diskUsage.types'; + +//-------Types--------// +type PieChartProps = { + path: string; + limit: number; + size: number; + subPaths: DUSubpath[]; + subPathCount: number; + sizeWithReplica: number; + loading: boolean; +} + +//-------Constants---------// +const OTHER_PATH_NAME = 'Other Objects'; +const MIN_BLOCK_SIZE = 0.05; + + +//----------Component---------// +const DUPieChart: React.FC = ({ + path, + limit, + size, + subPaths, + subPathCount, + sizeWithReplica, + loading +}) => { + + const [subpathSize, setSubpathSize] = React.useState(0); + + function getSubpathSize(subpaths: DUSubpath[]): number { + const subpathSize = subpaths + .map((subpath) => subpath.size) + .reduce((acc, curr) => acc + curr, 0); + // If there is no subpaths, then the size will be total size of path + return (subPaths.length === 0) ? size : subpathSize; + } + + function updatePieData() { + /** + * We need to calculate the size of "Other objects" in two cases: + * + * 1) If we have more subpaths listed, than the limit. + * 2) If the limit is set to the maximum limit (30) and we have any number of subpaths. + * In this case we won't necessarily have "Other objects", but we check if the + * other objects's size is more than zero (we will have other objects if there are more than 30 subpaths, + * but we can't check on that, as the response will always have + * 30 subpaths, but from the total size and the subpaths size we can calculate it). + */ + let subpaths: DUSubpath[] = subPaths; + + let pathLabels: string[] = []; + let percentage: string[] = []; + let sizeStr: string[]; + let valuesWithMinBlockSize: number[] = []; + + if (subPathCount > limit) { + // If the subpath count is greater than the provided limit + // Slice the subpath to the limit + subpaths = subpaths.slice(0, limit); + // Add the size of the subpath + const limitedSize = getSubpathSize(subpaths); + const remainingSize = size - limitedSize; + subpaths.push({ + path: OTHER_PATH_NAME, + size: remainingSize, + sizeWithReplica: (sizeWithReplica === -1) + ? -1 + : sizeWithReplica - remainingSize, + isKey: false + }) + } + + if (subPathCount === 0 || subpaths.length === 0) { + // No more subpaths available + pathLabels = [path.split('/').pop() ?? '']; + valuesWithMinBlockSize = [0.1]; + percentage = ['100.00']; + sizeStr = [byteToSize(size, 1)]; + } else { + pathLabels = subpaths.map(subpath => { + const subpathName = subpath.path.split('/').pop() ?? ''; + // Diferentiate keys by removing trailing slash + return (subpath.isKey || subpathName === OTHER_PATH_NAME) + ? subpathName + : subpathName + '/'; + }); + + let values: number[] = [0]; + if (size > 0) { + values = subpaths.map( + subpath => (subpath.size / size) + ); + } + const valueClone = structuredClone(values); + valuesWithMinBlockSize = valueClone?.map( + (val: number) => (val > 0) + ? val + MIN_BLOCK_SIZE + : val + ); + + percentage = values.map(value => (value * 100).toFixed(2)); + sizeStr = subpaths.map((subpath) => byteToSize(subpath.size, 1)); + } + + return valuesWithMinBlockSize.map((key, idx) => { + return { + value: key, + name: pathLabels[idx], + size: sizeStr[idx], + percentage: percentage[idx] + } + }); + } + + React.useEffect(() => { + setSubpathSize(getSubpathSize(subPaths)); + }, [subPaths, limit]); + + const pieData = React.useMemo(() => updatePieData(), [path, subPaths, limit]); + + const eChartsOptions = { + title: { + text: `${byteToSize(subpathSize, 1)} / ${byteToSize(size, 1)}`, + left: 'center', + top: '95%' + }, + tooltip: { + trigger: 'item', + formatter: ({ dataIndex, name, color }) => { + const nameEl = `${name}
    `; + const dataEl = `Total Data Size: ${pieData[dataIndex]['size']}
    ` + const percentageEl = `Percentage: ${pieData[dataIndex]['percentage']} %` + return `${nameEl}${dataEl}${percentageEl}` + } + }, + legend: { + top: '10%', + orient: 'vertical', + left: '0%', + width: '80%' + }, + grid: { + + }, + series: [ + { + type: 'pie', + radius: '70%', + data: pieData.map((value) => { + return { + value: value.value, + name: value.name + } + }), + emphasis: { + itemStyle: { + shadowBlur: 10, + shadowOffsetX: 0, + shadowColor: 'rgba(0, 0, 0, 0.5)' + } + } + } + ] + }; + + const handleLegendChange = ({selected}: {selected: Record}) => { + const filteredPath = subPaths.filter((value) => { + // In case of any leading '/' remove them and add a / at end + // to make it similar to legend + const splitPath = value.path?.split('/'); + const pathName = splitPath[splitPath.length - 1] ?? '' + ((value.isKey) ? '' : '/'); + return selected[pathName]; + }) + const newSize = getSubpathSize(filteredPath); + setSubpathSize(newSize); + } + + return ( + + ); +} + +export default DUPieChart; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/heatmapPlot.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/heatmapPlot.tsx new file mode 100644 index 00000000000..a58a7704dac --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/heatmapPlot.tsx @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AgChartsReact } from 'ag-charts-react'; +import { byteToSize } from '@/utils/common'; +import { HeatmapResponse } from '@/v2/types/heatmap.types'; + +type HeatmapPlotProps = { + data: HeatmapResponse; + onClick: (arg0: string) => void; + colorScheme: string[]; + entityType: string; +}; + +const capitalize = (str: T) => { + return str.charAt(0).toUpperCase() + str.slice(1) as Capitalize; +} + +const HeatmapPlot: React.FC = ({ + data, + onClick, + colorScheme, + entityType = '' +}) => { + + const tooltipContent = (params: any) => { + let tooltipContent = ` + Size: + ${byteToSize(params.datum.size, 1)} + `; + if (params.datum.accessCount !== undefined) { + tooltipContent += `
    + Access count: + ${params.datum.accessCount } + `; + } + else{ + tooltipContent += `
    + Max Access Count: + ${params.datum.maxAccessCount} + `;} + if (params.datum.label !== '') { + tooltipContent += `
    + Entity Name: + ${params.datum.label ? params.datum.label.split('/').slice(-1) : ''} + `; + } + tooltipContent += '
    '; + return tooltipContent; + }; + + const heatmapConfig = { + type: 'treemap', + labelKey: 'label',// the name of the key to fetch the label value from + sizeKey: 'normalizedSize',// the name of the key to fetch the value that will determine tile size + colorKey: 'color', + title: { color: '#424242', fontSize: 14, fontFamily: 'Roboto', fontWeight: '600' }, + subtitle: { color: '#424242', fontSize: 12, fontFamily: 'Roboto', fontWeight: '400' }, + tooltip: { + renderer: (params) => { + return { + content: tooltipContent(params) + }; + } + }, + formatter: ({ highlighted }: { highlighted: boolean }) => { + const stroke = highlighted ? '#CED4D9' : '#FFFFFF'; + return { stroke }; + }, + labels: { + color: '#FFFFFF', + fontWeight: 'bold', + fontSize: 12 + }, + tileStroke: '#FFFFFF', + tileStrokeWidth: 1.4, + colorDomain: [ + 0.000, + 0.050, + 0.100, + 0.150, + 0.200, + 0.250, + 0.300, + 0.350, + 0.400, + 0.450, + 0.500, + 0.550, + 0.600, + 0.650, + 0.700, + 0.750, + 0.800, + 0.850, + 0.900, + 0.950, + 1.000 + ], + colorRange: [...colorScheme], + groupFill: '#E6E6E6', + groupStroke: '#E1E2E6', + nodePadding: 3, + labelShadow: { enabled: false }, //labels shadow + gradient: false, + highlightStyle: { + text: { + color: '#424242', + }, + item: { + fill: 'rgba(0, 0 ,0, 0.0)', + }, + }, + listeners: { + nodeClick: (event) => { + var data = event.datum; + // Leaf level box should not call API + if (!data.color) + if (data.path) { + onClick(data.path); + } + }, + }, + } + + const options = { + data, + series: [heatmapConfig], + title: { text: `${capitalize(entityType)} Heatmap`} + }; + + return +} + +export default HeatmapPlot; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsContainerPlot.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsContainerPlot.tsx new file mode 100644 index 00000000000..851c355e765 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsContainerPlot.tsx @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import filesize from 'filesize'; +import { EChartsOption } from 'echarts'; + +import EChart from '@/v2/components/eChart/eChart'; +import { ContainerCountResponse, ContainerPlotData } from '@/v2/types/insights.types'; + +type ContainerSizeDistributionProps = { + containerCountResponse: ContainerCountResponse[]; + containerSizeError: string | undefined; +} + +const size = filesize.partial({ standard: 'iec', round: 0 }); + +const ContainerSizeDistribution: React.FC = ({ + containerCountResponse, + containerSizeError +}) => { + + const [containerPlotData, setContainerPlotData] = React.useState({ + containerCountValues: [], + containerCountMap: new Map() + }); + + function updatePlotData() { + const containerCountMap: Map = containerCountResponse.reduce( + (map: Map, current) => { + const containerSize = current.containerSize; + const oldCount = map.get(containerSize) ?? 0; + map.set(containerSize, oldCount + current.count); + return map; + }, + new Map() + ); + + const containerCountValues = Array.from(containerCountMap.keys()).map(value => { + const upperbound = size(value); + const upperboundPwr = Math.log2(value); + + const lowerbound = upperboundPwr > 10 ? size(2 ** (upperboundPwr - 1)) : size(0); + return `${lowerbound} - ${upperbound}`; + }); + + setContainerPlotData({ + containerCountValues: containerCountValues, + containerCountMap: containerCountMap + }); + } + + React.useEffect(() => { + updatePlotData(); + }, []); + + const { containerCountMap, containerCountValues } = containerPlotData; + + const containerPlotOptions: EChartsOption = { + tooltip: { + trigger: 'item', + formatter: ({ data }) => { + return `Size Range: ${data.name}
    Count: ${data.value}` + } + }, + legend: { + orient: 'vertical', + left: 'right' + }, + series: { + type: 'pie', + radius: '50%', + data: Array.from(containerCountMap?.values() ?? []).map((value, idx) => { + return { + value: value, + name: containerCountValues[idx] ?? '' + } + }), + }, + graphic: (containerSizeError) ? { + type: 'group', + left: 'center', + top: 'middle', + z: 100, + children: [ + { + type: 'rect', + left: 'center', + top: 'middle', + z: 100, + shape: { + width: 500, + height: 500 + }, + style: { + fill: 'rgba(256, 256, 256, 0.5)' + } + }, + { + type: 'rect', + left: 'center', + top: 'middle', + z: 100, + shape: { + width: 500, + height: 40 + }, + style: { + fill: '#FC909B' + } + }, + { + type: 'text', + left: 'center', + top: 'middle', + z: 100, + style: { + text: `No data available. ${containerSizeError}`, + font: '20px sans-serif' + } + } + ] + } : undefined + } + + return (<> + + ) +} + +export default ContainerSizeDistribution; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsFilePlot.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsFilePlot.tsx new file mode 100644 index 00000000000..bb6453ed7c1 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsFilePlot.tsx @@ -0,0 +1,251 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import filesize from 'filesize'; +import { EChartsOption } from 'echarts'; +import { ValueType } from 'react-select'; + +import EChart from '@/v2/components/eChart/eChart'; +import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; +import { FileCountResponse, FilePlotData } from '@/v2/types/insights.types'; + + +//-----Types------ +type FileSizeDistributionProps = { + volumeOptions: Option[]; + volumeBucketMap: Map>; + fileCountResponse: FileCountResponse[]; + fileCountError: string | undefined; +} + +const size = filesize.partial({ standard: 'iec', round: 0 }); + +const dropdownStyles: React.CSSProperties = { + display: 'flex', + justifyContent: 'space-between' +} + +const FileSizeDistribution: React.FC = ({ + volumeOptions = [], + volumeBucketMap, + fileCountResponse, + fileCountError +}) => { + + const [bucketOptions, setBucketOptions] = React.useState([]); + const [selectedBuckets, setSelectedBuckets] = React.useState([]); + const [selectedVolumes, setSelectedVolumes] = React.useState([]); + const [isBucketSelectionEnabled, setBucketSelectionEnabled] = React.useState(false); + + const [filePlotData, setFilePlotData] = React.useState({ + fileCountValues: [], + fileCountMap: new Map() + }); + + function handleVolumeChange(selectedVolumes: ValueType) { + + // Disable bucket selection options if more than one volume is selected or no volumes present + // If there is only one volume then the bucket selection is enabled + const bucketSelectionDisabled = ((selectedVolumes as Option[])?.length > 1 + && volumeBucketMap.size !== 1); + + let bucketOptions: Option[] = []; + + // Update buckets if only one volume is selected + if (selectedVolumes?.length === 1) { + const selectedVolume = selectedVolumes[0].value; + if (volumeBucketMap.has(selectedVolume)) { + bucketOptions = Array.from( + volumeBucketMap.get(selectedVolume)! + ).map(bucket => ({ + label: bucket, + value: bucket + })); + } + } + setBucketOptions([...bucketOptions]); + setSelectedVolumes(selectedVolumes as Option[]); + setSelectedBuckets([...bucketOptions]); + setBucketSelectionEnabled(!bucketSelectionDisabled); + } + + function handleBucketChange(selectedBuckets: ValueType) { + setSelectedBuckets(selectedBuckets as Option[]); + } + + function updatePlotData() { + // Aggregate count across volumes and buckets for use in plot + let filteredData = fileCountResponse; + const selectedVolumeValues = new Set(selectedVolumes.map(option => option.value)); + const selectedBucketValues = new Set(selectedBuckets.map(option => option.value)); + if (selectedVolumes.length >= 0) { + // Not all volumes are selected, need to filter based on the selected values + filteredData = filteredData.filter(data => selectedVolumeValues.has(data.volume)); + + // We have selected a volume but all the buckets are deselected + if (selectedVolumes.length === 1 && selectedBuckets.length === 0) { + // Since no buckets are selected there is no data + filteredData = []; + } + } + if (selectedBuckets.length > 0) { + // Not all buckcets are selected, filter based on the selected values + filteredData = filteredData.filter(data => selectedBucketValues.has(data.bucket)); + } + + // This is a map of 'size : count of the size' + const fileCountMap: Map = filteredData.reduce( + (map: Map, current) => { + const fileSize = current.fileSize; + const oldCount = map.get(fileSize) ?? 0; + map.set(fileSize, oldCount + current.count); + return map; + }, + new Map + ); + + // Calculate the previous power of 2 to find the lower bound of the range + // Ex: for 2048, the lower bound is 1024 + const fileCountValues = Array.from(fileCountMap.keys()).map(value => { + const upperbound = size(value); + const upperboundPwr = Math.log2(value); + // For 1024 i.e 2^10, the lower bound is 0, so we start binning after 2^10 + const lowerbound = upperboundPwr > 10 ? size(2 ** (upperboundPwr - 1)) : size(0); + return `${lowerbound} - ${upperbound}`; + }); + + setFilePlotData({ + fileCountValues: fileCountValues, + // set the sorted value by size for the map + fileCountMap: new Map([...fileCountMap.entries()].sort((a, b) => a[0] - b[0])) + }); + } + + // If the response is updated or the volume-bucket data is updated, update plot + React.useEffect(() => { + updatePlotData(); + handleVolumeChange(volumeOptions); + }, [ + fileCountResponse, volumeBucketMap + ]); + + // If the selected volumes and buckets change, update plot + React.useEffect(() => { + updatePlotData(); + }, [selectedVolumes, selectedBuckets]) + + const { fileCountValues, fileCountMap } = filePlotData; + + const filePlotOptions: EChartsOption = { + xAxis: { + type: 'category', + data: [...fileCountValues] ?? [] + }, + yAxis: { + type: 'value' + }, + tooltip: { + trigger: 'item', + formatter: ({ name, value }) => { + return `Size Range: ${name}
    Count: ${value}` + } + }, + series: { + itemStyle: { + color: '#04AD78' + }, + data: Array.from(fileCountMap?.values() ?? []), + type: 'bar' + }, + graphic: (fileCountError) ? { + type: 'group', + left: 'center', + top: 'middle', + z: 100, + children: [ + { + type: 'rect', + left: 'center', + top: 'middle', + z: 100, + shape: { + width: 500, + height: 40 + }, + style: { + fill: '#FC909B' + } + }, + { + type: 'text', + left: 'center', + top: 'middle', + z: 100, + style: { + text: `No data available. ${fileCountError}`, + font: '20px sans-serif' + } + } + ] + } : undefined + } + + return (<> +
    + { }} + fixedColumn='' + columnLength={volumeOptions.length} + style={{ + control: (baseStyles, state) => ({ + ...baseStyles, + minWidth: 345 + }) + }} /> + { }} + fixedColumn='' + columnLength={bucketOptions.length} + isDisabled={!isBucketSelectionEnabled} + style={{ + control: (baseStyles, state) => ({ + ...baseStyles, + minWidth: 345 + }) + }} /> +
    + + ) +} + +export default FileSizeDistribution; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx index 21d4341787e..d320fd659a6 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx @@ -20,9 +20,11 @@ import React from 'react'; import { Input, Select } from 'antd'; import { Option } from '@/v2/components/select/singleSelect'; +import { DownOutlined } from '@ant-design/icons'; // ------------- Types -------------- // type SearchProps = { + disabled?: boolean; searchColumn?: string; searchInput: string; searchOptions?: Option[]; @@ -39,6 +41,7 @@ type SearchProps = { // ------------- Component -------------- // const Search: React.FC = ({ + disabled = false, searchColumn, searchInput = '', searchOptions = [], @@ -48,6 +51,8 @@ const Search: React.FC = ({ const selectFilter = searchColumn ? ( { placeholder: string; fixedColumn: string; columnLength: number; + style?: StylesConfig; onChange: (arg0: ValueType) => void; onTagClose: (arg0: string) => void; } // ------------- Component -------------- // + +const Option: React.FC> = (props) => { + return ( +
    + + null} /> + + +
    + ) +} + + const MultiSelect: React.FC = ({ options = [], selected = [], maxSelected = 5, placeholder = 'Columns', + isDisabled = false, fixedColumn, columnLength, tagRef, + style, onTagClose = () => { }, // Assign default value as a void function onChange = () => { }, // Assign default value as a void function ...props }) => { - const Option: React.FC> = (props) => { + const ValueContainer = ({ children, ...props }: ValueContainerProps) => { return ( -
    - - null} /> - - -
    - ) - } + + {React.Children.map(children, (child) => ( + ((child as React.ReactElement> + | React.ReactPortal)?.type as React.JSXElementConstructor)).name === "DummyInput" + ? child + : null + )} + {isDisabled + ? placeholder + : `${placeholder}: ${selected.length} selected` +} + + ); + }; + + const finalStyles = {...selectStyles, ...style ?? {}} return ( ) => { - if (selected?.length === options.length) return onChange!(options); - return onChange!(selected); - }} - styles={selectStyles} /> + {...props} + isMulti={true} + closeMenuOnSelect={false} + hideSelectedOptions={false} + isClearable={false} + isSearchable={false} + controlShouldRenderValue={false} + classNamePrefix='multi-select' + options={options} + components={{ + ValueContainer, + Option + }} + placeholder={placeholder} + value={selected} + isOptionDisabled={(option) => option.value === fixedColumn} + isDisabled={isDisabled} + onChange={(selected: ValueType) => { + if (selected?.length === options.length) return onChange!(options); + return onChange!(selected); + }} + styles={finalStyles} /> ) } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx index 41ab03f5982..1d02b407334 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx @@ -50,7 +50,7 @@ const SingleSelect: React.FC = ({ const ValueContainer = ({ children, ...props }: ValueContainerProps) => { - const selectedLimit = props.getValue() as Option[]; + const selectedValue = props.getValue() as Option[]; return ( {React.Children.map(children, (child) => ( @@ -60,7 +60,7 @@ const SingleSelect: React.FC = ({ ? child : null )} - Limit: {selectedLimit[0]?.label ?? ''} + {placeholder}: {selectedValue[0]?.label ?? ''} ); }; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.less new file mode 100644 index 00000000000..798287366c3 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.less @@ -0,0 +1,45 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +@progress-gray: #d0d0d0; +@progress-light-blue: rgb(230, 235, 248); +@progress-blue: #1890ff; +@progress-green: #52c41a; +@progress-red: #FFA39E; + +.storage-cell-container-v2 { + .capacity-bar-v2 { + font-size: 1em; + } +} + +.ozone-used-bg-v2 { + color: @progress-green !important; +} + +.non-ozone-used-bg-v2 { + color: @progress-blue !important; +} + +.remaining-bg-v2 { + color: @progress-light-blue !important; +} + +.committed-bg-v2 { + color: @progress-red !important; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx index 591b0088b04..fd6dd8dfe9b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx @@ -20,72 +20,73 @@ import React from 'react'; import { Progress } from 'antd'; import filesize from 'filesize'; import Icon from '@ant-design/icons'; -import { withRouter } from 'react-router-dom'; import Tooltip from 'antd/lib/tooltip'; import { FilledIcon } from '@/utils/themeIcons'; import { getCapacityPercent } from '@/utils/common'; import type { StorageReport } from '@/v2/types/overview.types'; +import './storageBar.less'; + const size = filesize.partial({ standard: 'iec', round: 1 }); type StorageReportProps = { - showMeta: boolean; + showMeta?: boolean; + strokeWidth?: number; } & StorageReport -const StorageBar = (props: StorageReportProps = { - capacity: 0, - used: 0, - remaining: 0, - committed: 0, - showMeta: true, +const StorageBar: React.FC = ({ + capacity = 0, + used = 0, + remaining = 0, + committed = 0, + showMeta = false, + strokeWidth = 3 }) => { - const { capacity, used, remaining, committed, showMeta } = props; const nonOzoneUsed = capacity - remaining - used; const totalUsed = capacity - remaining; const tooltip = ( <>
    - + Ozone Used ({size(used)})
    - + Non Ozone Used ({size(nonOzoneUsed)})
    - + Remaining ({size(remaining)})
    - + Container Pre-allocated ({size(committed)})
    ); - const metaElement = (showMeta) ? ( -
    - {size(used + nonOzoneUsed)} / {size(capacity)} -
    - ) : <>; - return ( -
    - - {metaElement} + + {(showMeta) && +
    + {size(used + nonOzoneUsed)} / {size(capacity)} +
    + } + className='capacity-bar-v2' strokeWidth={strokeWidth} />
    -
    ); } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx new file mode 100644 index 00000000000..0060177795b --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import moment from 'moment'; +import Table, { + ColumnProps, + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import Tag from 'antd/es/tag'; +import { + CheckCircleOutlined, + CloseCircleOutlined, + CloudServerOutlined, + FileUnknownOutlined, + HddOutlined, + LaptopOutlined, + SaveOutlined +} from '@ant-design/icons'; + +import QuotaBar from '@/components/quotaBar/quotaBar'; +import { nullAwareLocaleCompare } from '@/utils/common'; +import { + Bucket, + BucketLayout, + BucketLayoutTypeList, + BucketsTableProps, + BucketStorage, + BucketStorageTypeList +} from '@/v2/types/bucket.types'; + +function renderIsVersionEnabled(isVersionEnabled: boolean) { + return isVersionEnabled + ? + : +}; + +function renderStorageType(bucketStorage: BucketStorage) { + const bucketStorageIconMap: Record = { + RAM_DISK: , + SSD: , + DISK: , + ARCHIVE: + }; + const icon = bucketStorage in bucketStorageIconMap + ? bucketStorageIconMap[bucketStorage] + : ; + return {icon} {bucketStorage}; +}; + +function renderBucketLayout(bucketLayout: BucketLayout) { + const bucketLayoutColorMap = { + FILE_SYSTEM_OPTIMIZED: 'green', + OBJECT_STORE: 'orange', + LEGACY: 'blue' + }; + const color = bucketLayout in bucketLayoutColorMap ? + bucketLayoutColorMap[bucketLayout] : ''; + return {bucketLayout}; +}; + +export const COLUMNS: ColumnsType = [ + { + title: 'Bucket', + dataIndex: 'name', + key: 'name', + sorter: (a: Bucket, b: Bucket) => a.name.localeCompare(b.name), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Volume', + dataIndex: 'volumeName', + key: 'volumeName', + sorter: (a: Bucket, b: Bucket) => a.volumeName.localeCompare(b.volumeName), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Owner', + dataIndex: 'owner', + key: 'owner', + sorter: (a: Bucket, b: Bucket) => nullAwareLocaleCompare(a.owner, b.owner) + }, + { + title: 'Versioning', + dataIndex: 'versioning', + key: 'isVersionEnabled', + render: (isVersionEnabled: boolean) => renderIsVersionEnabled(isVersionEnabled) + }, + { + title: 'Storage Type', + dataIndex: 'storageType', + key: 'storageType', + filterMultiple: true, + filters: BucketStorageTypeList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Bucket) => record.storageType === value, + sorter: (a: Bucket, b: Bucket) => a.storageType.localeCompare(b.storageType), + render: (storageType: BucketStorage) => renderStorageType(storageType) + }, + { + title: 'Bucket Layout', + dataIndex: 'bucketLayout', + key: 'bucketLayout', + filterMultiple: true, + filters: BucketLayoutTypeList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Bucket) => record.bucketLayout === value, + sorter: (a: Bucket, b: Bucket) => a.bucketLayout.localeCompare(b.bucketLayout), + render: (bucketLayout: BucketLayout) => renderBucketLayout(bucketLayout) + }, + { + title: 'Creation Time', + dataIndex: 'creationTime', + key: 'creationTime', + sorter: (a: Bucket, b: Bucket) => a.creationTime - b.creationTime, + render: (creationTime: number) => { + return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Modification Time', + dataIndex: 'modificationTime', + key: 'modificationTime', + sorter: (a: Bucket, b: Bucket) => a.modificationTime - b.modificationTime, + render: (modificationTime: number) => { + return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Storage Capacity', + key: 'quotaCapacityBytes', + sorter: (a: Bucket, b: Bucket) => a.usedBytes - b.usedBytes, + render: (text: string, record: Bucket) => ( + + ) + }, + { + title: 'Namespace Capacity', + key: 'namespaceCapacity', + sorter: (a: Bucket, b: Bucket) => a.usedNamespace - b.usedNamespace, + render: (text: string, record: Bucket) => ( + + ) + }, + { + title: 'Source Volume', + dataIndex: 'sourceVolume', + key: 'sourceVolume', + render: (sourceVolume: string) => { + return sourceVolume ? sourceVolume : 'NA'; + } + }, + { + title: 'Source Bucket', + dataIndex: 'sourceBucket', + key: 'sourceBucket', + render: (sourceBucket: string) => { + return sourceBucket ? sourceBucket : 'NA'; + } + } +]; + +const BucketsTable: React.FC = ({ + loading = false, + data, + handleAclClick, + selectedColumns, + searchColumn = 'name', + searchTerm = '' +}) => { + + React.useEffect(() => { + const aclColumn: ColumnProps = { + title: 'ACLs', + dataIndex: 'acls', + key: 'acls', + render: (_: any, record: Bucket) => { + return ( + { + handleAclClick(record); + }} + > + Show ACL + + ); + } + }; + + if (COLUMNS.length > 0 && COLUMNS[COLUMNS.length - 1].key !== 'acls') { + // Push the ACL column for initial load + COLUMNS.push(aclColumn); + selectedColumns.push({ + label: aclColumn.title as string, + value: aclColumn.key as string + }); + } else { + // Replace old ACL column with new ACL column with correct reference + // e.g. After page is reloaded / redirect from other page + COLUMNS[COLUMNS.length - 1] = aclColumn; + selectedColumns[selectedColumns.length - 1] = { + label: aclColumn.title as string, + value: aclColumn.key as string + } + } + }, []); + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ) + } + + function getFilteredData(data: Bucket[]) { + return data.filter( + (bucket: Bucket) => bucket[searchColumn].includes(searchTerm) + ); + } + + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} buckets`, + showSizeChanger: true + }; + + return ( +
    + `${record.volumeName}/${record.name}`} + pagination={paginationConfig} + scroll={{ x: 'max-content', scrollToFirstRowOnChange: true }} + locale={{ filterTitle: '' }} + /> + + ) +} + +export default BucketsTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/containersTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/containersTable.tsx new file mode 100644 index 00000000000..1bb1b5456b5 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/containersTable.tsx @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useRef } from 'react'; +import filesize from 'filesize'; +import { AxiosError } from 'axios'; +import { Popover, Table } from 'antd'; +import { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { NodeIndexOutlined } from '@ant-design/icons'; + +import { getFormattedTime } from '@/v2/utils/momentUtils'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { + Container, ContainerKeysResponse, ContainerReplica, + ContainerTableProps, + ExpandedRowState, KeyResponse +} from '@/v2/types/container.types'; + +const size = filesize.partial({ standard: 'iec' }); + +export const COLUMNS: ColumnsType = [ + { + title: 'Container ID', + dataIndex: 'containerID', + key: 'containerID', + sorter: (a: Container, b: Container) => a.containerID - b.containerID + }, + { + title: 'No. of Keys', + dataIndex: 'keys', + key: 'keys', + sorter: (a: Container, b: Container) => a.keys - b.keys + }, + { + title: 'Actual/Expected Replica(s)', + dataIndex: 'expectedReplicaCount', + key: 'expectedReplicaCount', + render: (expectedReplicaCount: number, record: Container) => { + const actualReplicaCount = record.actualReplicaCount; + return ( + + {actualReplicaCount} / {expectedReplicaCount} + + ); + } + }, + { + title: 'Datanodes', + dataIndex: 'replicas', + key: 'replicas', + render: (replicas: ContainerReplica[]) => { + const renderDatanodes = (replicas: ContainerReplica[]) => { + return replicas?.map((replica: any, idx: number) => ( +
    + {replica.datanodeHost} +
    + )) + } + + return ( + + {replicas.length} datanodes + + ) + } + }, + { + title: 'Pipeline ID', + dataIndex: 'pipelineID', + key: 'pipelineID' + }, + { + title: 'Unhealthy Since', + dataIndex: 'unhealthySince', + key: 'unhealthySince', + render: (unhealthySince: number) => getFormattedTime(unhealthySince, 'lll'), + sorter: (a: Container, b: Container) => a.unhealthySince - b.unhealthySince + } +]; + +const KEY_TABLE_COLUMNS: ColumnsType = [ + { + title: 'Volume', + dataIndex: 'Volume', + key: 'Volume' + }, + { + title: 'Bucket', + dataIndex: 'Bucket', + key: 'Bucket' + }, + { + title: 'Key', + dataIndex: 'Key', + key: 'Key' + }, + { + title: 'Size', + dataIndex: 'DataSize', + key: 'DataSize', + render: (dataSize: number) =>
    {size(dataSize)}
    + }, + { + title: 'Date Created', + dataIndex: 'CreationTime', + key: 'CreationTime', + render: (date: string) => getFormattedTime(date, 'lll') + }, + { + title: 'Date Modified', + dataIndex: 'ModificationTime', + key: 'ModificationTime', + render: (date: string) => getFormattedTime(date, 'lll') + }, + { + title: 'Path', + dataIndex: 'CompletePath', + key: 'path' + } +]; + +const ContainerTable: React.FC = ({ + data, + loading, + selectedColumns, + expandedRow, + expandedRowSetter, + searchColumn = 'containerID', + searchTerm = '' +}) => { + + const cancelSignal = useRef(); + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ); + } + + function loadRowData(containerID: number) { + const { request, controller } = AxiosGetHelper( + `/api/v1/containers/${containerID}/keys`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + const containerKeysResponse: ContainerKeysResponse = response.data; + expandedRowSetter({ + ...expandedRow, + [containerID]: { + ...expandedRow[containerID], + loading: false, + dataSource: containerKeysResponse.keys, + totalCount: containerKeysResponse.totalCount + } + }); + }).catch(error => { + expandedRowSetter({ + ...expandedRow, + [containerID]: { + ...expandedRow[containerID], + loading: false + } + }); + showDataFetchError((error as AxiosError).toString()); + }); + } + + function getFilteredData(data: Container[]) { + + return data?.filter( + (container: Container) => { + return (searchColumn === 'containerID') + ? container[searchColumn].toString().includes(searchTerm) + : container[searchColumn].includes(searchTerm) + } + ) ?? []; + } + + function onRowExpandClick(expanded: boolean, record: Container) { + if (expanded) { + loadRowData(record.containerID); + } + else { + cancelSignal.current && cancelSignal.current.abort(); + } + } + + function expandedRowRender(record: Container) { + const containerId = record.containerID + const containerKeys: ExpandedRowState = expandedRow[containerId]; + const dataSource = containerKeys?.dataSource ?? []; + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} Keys` + } + + return ( +
    `${record.Volume}/${record.Bucket}/${record.Key}`} + locale={{ filterTitle: '' }} /> + ) + }; + + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => ( + `${range[0]}-${range[1]} of ${total} Containers` + ), + showSizeChanger: true + }; + + return ( +
    +
    + + ); +} + +export default ContainerTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx new file mode 100644 index 00000000000..494d898509b --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx @@ -0,0 +1,314 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import moment from 'moment'; +import { Popover, Tooltip } from 'antd' +import { + CheckCircleFilled, + CloseCircleFilled, + HourglassFilled, + InfoCircleOutlined, + WarningFilled +} from '@ant-design/icons'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { TableRowSelection } from 'antd/es/table/interface'; + +import StorageBar from '@/v2/components/storageBar/storageBar'; +import DecommissionSummary from '@/v2/components/decommissioningSummary/decommissioningSummary'; + +import { ReplicationIcon } from '@/utils/themeIcons'; +import { getTimeDiffFromTimestamp } from '@/v2/utils/momentUtils'; + +import { + Datanode, + DatanodeOpState, + DatanodeOpStateList, + DatanodeState, + DatanodeStateList, + DatanodeTableProps +} from '@/v2/types/datanode.types'; +import { Pipeline } from '@/v2/types/pipelines.types'; + + +let decommissioningUuids: string | string[] = []; + +const headerIconStyles: React.CSSProperties = { + display: 'flex', + alignItems: 'center' +} + +const renderDatanodeState = (state: DatanodeState) => { + const stateIconMap = { + HEALTHY: , + STALE: , + DEAD: + }; + const icon = state in stateIconMap ? stateIconMap[state] : ''; + return {icon} {state}; +}; + +const renderDatanodeOpState = (opState: DatanodeOpState) => { + const opStateIconMap = { + IN_SERVICE: , + DECOMMISSIONING: , + DECOMMISSIONED: , + ENTERING_MAINTENANCE: , + IN_MAINTENANCE: + }; + const icon = opState in opStateIconMap ? opStateIconMap[opState] : ''; + return {icon} {opState}; +}; + +export const COLUMNS: ColumnsType = [ + { + title: 'Hostname', + dataIndex: 'hostname', + key: 'hostname', + sorter: (a: Datanode, b: Datanode) => a.hostname.localeCompare( + b.hostname, undefined, { numeric: true } + ), + defaultSortOrder: 'ascend' as const + }, + { + title: 'State', + dataIndex: 'state', + key: 'state', + filterMultiple: true, + filters: DatanodeStateList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Datanode) => record.state === value, + render: (text: DatanodeState) => renderDatanodeState(text), + sorter: (a: Datanode, b: Datanode) => a.state.localeCompare(b.state) + }, + { + title: 'Operational State', + dataIndex: 'opState', + key: 'opState', + filterMultiple: true, + filters: DatanodeOpStateList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Datanode) => record.opState === value, + render: (text: DatanodeOpState) => renderDatanodeOpState(text), + sorter: (a: Datanode, b: Datanode) => a.opState.localeCompare(b.opState) + }, + { + title: 'UUID', + dataIndex: 'uuid', + key: 'uuid', + sorter: (a: Datanode, b: Datanode) => a.uuid.localeCompare(b.uuid), + defaultSortOrder: 'ascend' as const, + render: (uuid: string, record: Datanode) => { + return ( + //1. Compare Decommission Api's UUID with all UUID in table and show Decommission Summary + (decommissioningUuids && decommissioningUuids.includes(record.uuid) && record.opState !== 'DECOMMISSIONED') ? + : {uuid} + ); + } + }, + { + title: 'Storage Capacity', + dataIndex: 'storageUsed', + key: 'storageUsed', + sorter: (a: Datanode, b: Datanode) => a.storageRemaining - b.storageRemaining, + render: (_: string, record: Datanode) => ( + + ) + }, + { + title: 'Last Heartbeat', + dataIndex: 'lastHeartbeat', + key: 'lastHeartbeat', + sorter: (a: Datanode, b: Datanode) => moment(a.lastHeartbeat).unix() - moment(b.lastHeartbeat).unix(), + render: (heartbeat: number) => { + return heartbeat > 0 ? getTimeDiffFromTimestamp(heartbeat) : 'NA'; + } + }, + { + title: 'Pipeline ID(s)', + dataIndex: 'pipelines', + key: 'pipelines', + render: (pipelines: Pipeline[], record: Datanode) => { + const renderPipelineIds = (pipelineIds: Pipeline[]) => { + return pipelineIds?.map((pipeline: any, index: any) => ( +
    + + {pipeline.pipelineID} +
    + )) + } + + return ( + + {pipelines.length} pipelines + + ); + } + }, + { + title: () => ( + + Leader Count + + + + + ), + dataIndex: 'leaderCount', + key: 'leaderCount', + sorter: (a: Datanode, b: Datanode) => a.leaderCount - b.leaderCount + }, + { + title: 'Containers', + dataIndex: 'containers', + key: 'containers', + sorter: (a: Datanode, b: Datanode) => a.containers - b.containers + }, + { + title: () => ( + + Open Container + + + + + ), + dataIndex: 'openContainers', + key: 'openContainers', + sorter: (a: Datanode, b: Datanode) => a.openContainers - b.openContainers + }, + { + title: 'Version', + dataIndex: 'version', + key: 'version', + sorter: (a: Datanode, b: Datanode) => a.version.localeCompare(b.version), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Setup Time', + dataIndex: 'setupTime', + key: 'setupTime', + sorter: (a: Datanode, b: Datanode) => a.setupTime - b.setupTime, + render: (uptime: number) => { + return uptime > 0 ? moment(uptime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Revision', + dataIndex: 'revision', + key: 'revision', + sorter: (a: Datanode, b: Datanode) => a.revision.localeCompare(b.revision), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Build Date', + dataIndex: 'buildDate', + key: 'buildDate', + sorter: (a: Datanode, b: Datanode) => a.buildDate.localeCompare(b.buildDate), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Network Location', + dataIndex: 'networkLocation', + key: 'networkLocation', + sorter: (a: Datanode, b: Datanode) => a.networkLocation.localeCompare(b.networkLocation), + defaultSortOrder: 'ascend' as const + } +]; + +const DatanodesTable: React.FC = ({ + data, + handleSelectionChange, + decommissionUuids, + selectedColumns, + loading = false, + selectedRows = [], + searchColumn = 'hostname', + searchTerm = '' +}) => { + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ); + } + + function getFilteredData(data: Datanode[]) { + return data?.filter( + (datanode: Datanode) => datanode[searchColumn].includes(searchTerm) + ) ?? []; + } + + function isSelectable(record: Datanode) { + // Disable checkbox for any datanode which is not DEAD to prevent removal + return record.state !== 'DEAD' && true; + } + + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => ( + `${range[0]}-${range[1]} of ${total} Datanodes` + ), + showSizeChanger: true + }; + + const rowSelection: TableRowSelection = { + selectedRowKeys: selectedRows, + onChange: (rows: React.Key[]) => { handleSelectionChange(rows) }, + getCheckboxProps: (record: Datanode) => ({ + disabled: isSelectable(record) + }), + }; + + React.useEffect(() => { + decommissioningUuids = decommissionUuids; + }, [decommissionUuids]) + + return ( +
    +
    + + ); +} + +export default DatanodesTable; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/containerMismatchTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/containerMismatchTable.tsx new file mode 100644 index 00000000000..818eca37f8e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/containerMismatchTable.tsx @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import { + Dropdown, + Menu, + Popover, + Table, + Tooltip +} from 'antd'; +import { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { + MenuProps as FilterMenuProps +} from 'antd/es/menu'; +import { FilterFilled, InfoCircleOutlined } from '@ant-design/icons'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { + Container, + MismatchContainersResponse, + Pipelines +} from '@/v2/types/insights.types'; + + +//-----Types----- +type ContainerMismatchTableProps = { + paginationConfig: TablePaginationConfig; + limit: Option; + handleLimitChange: (arg0: ValueType) => void; + expandedRowRender: (arg0: any) => JSX.Element; + onRowExpand: (arg0: boolean, arg1: any) => void; +} + +//-----Components------ +const ContainerMismatchTable: React.FC = ({ + paginationConfig, + limit, + onRowExpand, + expandedRowRender, + handleLimitChange +}) => { + + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + const handleExistAtChange: FilterMenuProps['onClick'] = ({ key }) => { + if (key === 'OM') { + fetchMismatchContainers('SCM'); + } else { + fetchMismatchContainers('OM'); + } + } + + function filterData(data: Container[] | undefined) { + return data?.filter( + (data: Container) => data.containerId.toString().includes(debouncedSearch) + ); + } + + const COLUMNS: ColumnsType = [ + { + title: 'Container ID', + dataIndex: 'containerId', + key: 'containerId', + width: '20%' + + }, + { + title: 'Count Of Keys', + dataIndex: 'numberOfKeys', + key: 'numberOfKeys', + sorter: (a: Container, b: Container) => a.numberOfKeys - b.numberOfKeys + }, + { + title: 'Pipelines', + dataIndex: 'pipelines', + key: 'pipelines', + render: (pipelines: Pipelines[]) => { + const renderPipelineIds = (pipelineIds: Pipelines[]) => { + return pipelineIds?.map(pipeline => ( +
    + {pipeline.id.id} +
    + )); + } + return ( + + {pipelines.length} pipelines + + ) + } + }, + { + title: <> + + OM + SCM + + }> + + + + SCM: Container exists at SCM but missing at OM.
    + OM: Container exist at OM but missing at SCM. + }> + +
    + , + dataIndex: 'existsAt' + } + ]; + + function fetchMismatchContainers(missingIn: string) { + setLoading(true); + const { request, controller } = AxiosGetHelper( + `/api/v1/containers/mismatch?limit=${limit.value}&missingIn=${missingIn}`, + cancelSignal.current + ); + + cancelSignal.current = controller; + request.then(response => { + const mismatchedContainers: MismatchContainersResponse = response?.data; + setData(mismatchedContainers?.containerDiscrepancyInfo ?? []); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }) + } + + React.useEffect(() => { + //Fetch containers missing in OM by default + fetchMismatchContainers('OM'); + + return (() => { + cancelSignal.current && cancelSignal.current.abort(); + }) + }, [limit.value]); + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ) +} + +export default ContainerMismatchTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingDirsTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingDirsTable.tsx new file mode 100644 index 00000000000..f0c6fc8161e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingDirsTable.tsx @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; +import { getFormattedTime } from '@/v2/utils/momentUtils'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { DeletedDirInfo } from '@/v2/types/insights.types'; + +//-----Types------ +type DeletePendingDirTableProps = { + paginationConfig: TablePaginationConfig + limit: Option; + handleLimitChange: (arg0: ValueType) => void; +} + +//-----Constants------ +const COLUMNS: ColumnsType = [{ + title: 'Directory Name', + dataIndex: 'key', + key: 'key' +}, +{ + title: 'In state since', + dataIndex: 'inStateSince', + key: 'inStateSince', + render: (inStateSince: number) => { + return getFormattedTime(inStateSince, 'll LTS'); + } +}, +{ + title: 'Path', + dataIndex: 'path', + key: 'path' +}, +{ + title: 'Size', + dataIndex: 'size', + key: 'size', + render: (dataSize: number) => byteToSize(dataSize, 1) +}]; + +//-----Components------ +const DeletePendingDirTable: React.FC = ({ + limit, + paginationConfig, + handleLimitChange +}) => { + + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: DeletedDirInfo[] | undefined) { + return data?.filter( + (data: DeletedDirInfo) => data.key.includes(debouncedSearch) + ); + } + + function loadData() { + setLoading(true); + + const { request, controller } = AxiosGetHelper( + `/api/v1/keys/deletePending/dirs?limit=${limit.value}`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + setData(response?.data?.deletedDirInfo ?? []); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }); + } + + React.useEffect(() => { + loadData(); + + return (() => cancelSignal.current && cancelSignal.current.abort()); + }, [limit.value]); + + return (<> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + ) +} + +export default DeletePendingDirTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingKeysTable.tsx new file mode 100644 index 00000000000..65ada495641 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingKeysTable.tsx @@ -0,0 +1,194 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import { AxiosError } from 'axios'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import ExpandedPendingKeysTable from '@/v2/components/tables/insights/expandedPendingKeysTable'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { + DeletePendingKey, + DeletePendingKeysResponse +} from '@/v2/types/insights.types'; + +//-----Types------ +type DeletePendingKeysTableProps = { + paginationConfig: TablePaginationConfig + limit: Option; + handleLimitChange: (arg0: ValueType) => void; +} + +type DeletePendingKeysColumns = { + fileName: string; + keyName: string; + dataSize: number; + keyCount: number; +} + +type ExpandedDeletePendingKeys = { + omKeyInfoList: DeletePendingKey[] +} + +//------Constants------ +const COLUMNS: ColumnsType = [ + { + title: 'Key Name', + dataIndex: 'fileName', + key: 'fileName' + }, + { + title: 'Path', + dataIndex: 'keyName', + key: 'keyName', + }, + { + title: 'Total Data Size', + dataIndex: 'dataSize', + key: 'dataSize', + render: (dataSize: number) => byteToSize(dataSize, 1) + }, + { + title: 'Total Key Count', + dataIndex: 'keyCount', + key: 'keyCount', + } +]; + +let expandedDeletePendingKeys: ExpandedDeletePendingKeys[] = []; + +//-----Components------ +const DeletePendingKeysTable: React.FC = ({ + paginationConfig, + limit, + handleLimitChange +}) => { + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: DeletePendingKeysColumns[] | undefined) { + return data?.filter( + (data: DeletePendingKeysColumns) => data.keyName.includes(debouncedSearch) + ); + } + + function expandedRowRender(record: DeletePendingKeysColumns) { + const filteredData = expandedDeletePendingKeys?.flatMap((info) => ( + info.omKeyInfoList?.filter((key) => key.keyName === record.keyName) + )); + return ( + + ) + } + + function fetchDeletePendingKeys() { + setLoading(true); + const { request, controller } = AxiosGetHelper( + `/api/v1/keys/deletePending?limit=${limit.value}`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + const deletePendingKeys: DeletePendingKeysResponse = response?.data; + let deletedKeyData = []; + // Sum up the data size and organize related key information + deletedKeyData = deletePendingKeys?.deletedKeyInfo?.flatMap((keyInfo) => { + expandedDeletePendingKeys.push(keyInfo); + let count = 0; + let item: DeletePendingKey = keyInfo.omKeyInfoList?.reduce((obj, curr) => { + count += 1; + return { ...curr, dataSize: obj.dataSize + curr.dataSize }; + }, { ...keyInfo.omKeyInfoList[0], dataSize: 0 }); + + return { + dataSize: item.dataSize, + fileName: item.fileName, + keyName: item.keyName, + path: item.path, + keyCount: count + } + }); + setData(deletedKeyData); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }) + } + + React.useEffect(() => { + fetchDeletePendingKeys(); + expandedDeletePendingKeys = []; + + return (() => { + cancelSignal.current && cancelSignal.current.abort(); + }) + }, [limit.value]); + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ) +} + +export default DeletePendingKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletedContainerKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletedContainerKeysTable.tsx new file mode 100644 index 00000000000..9aaf62a63d6 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletedContainerKeysTable.tsx @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { showDataFetchError } from '@/utils/common'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { + Container, + DeletedContainerKeysResponse, + Pipelines +} from '@/v2/types/insights.types'; + +//------Types------- +type DeletedContainerKeysTableProps = { + paginationConfig: TablePaginationConfig; + limit: Option; + handleLimitChange: (arg0: ValueType) => void; + onRowExpand: (arg0: boolean, arg1: any) => void; + expandedRowRender: (arg0: any) => JSX.Element; +} + +//------Constants------ +const COLUMNS: ColumnsType = [ + { + title: 'Container ID', + dataIndex: 'containerId', + key: 'containerId', + width: '20%' + }, + { + title: 'Count Of Keys', + dataIndex: 'numberOfKeys', + key: 'numberOfKeys', + sorter: (a: Container, b: Container) => a.numberOfKeys - b.numberOfKeys + }, + { + title: 'Pipelines', + dataIndex: 'pipelines', + key: 'pipelines', + render: (pipelines: Pipelines[]) => ( +
    + {pipelines && pipelines.map((pipeline: any) => ( +
    + {pipeline.id.id} +
    + ))} +
    + ) + } +]; + +//-----Components------ +const DeletedContainerKeysTable: React.FC = ({ + limit, + paginationConfig, + handleLimitChange, + onRowExpand, + expandedRowRender +}) => { + + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: Container[] | undefined) { + return data?.filter( + (data: Container) => data.containerId.toString().includes(debouncedSearch) + ); + } + + function fetchDeletedKeys() { + const { request, controller } = AxiosGetHelper( + `/api/v1/containers/mismatch/deleted?limit=${limit.value}`, + cancelSignal.current + ) + cancelSignal.current = controller; + + request.then(response => { + setLoading(true); + const deletedContainerKeys: DeletedContainerKeysResponse = response?.data; + setData(deletedContainerKeys?.containers ?? []); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }); + } + + React.useEffect(() => { + fetchDeletedKeys(); + + return (() => { + cancelSignal.current && cancelSignal.current.abort(); + }) + }, [limit.value]); + + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ) +} + +export default DeletedContainerKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedKeyTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedKeyTable.tsx new file mode 100644 index 00000000000..8b54937e473 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedKeyTable.tsx @@ -0,0 +1,93 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import moment from 'moment'; +import filesize from 'filesize'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; + +import { MismatchKeys } from '@/v2/types/insights.types'; + + +const size = filesize.partial({ standard: 'iec' }); + +//-----Types------ +type ExpandedKeyTableProps = { + loading: boolean; + data: MismatchKeys[]; + paginationConfig: TablePaginationConfig; +} + +//-----Constants----- +const COLUMNS: ColumnsType = [ + { + title: 'Volume', + dataIndex: 'Volume', + key: 'Volume' + }, + { + title: 'Bucket', + dataIndex: 'Bucket', + key: 'Bucket' + }, + { + title: 'Key', + dataIndex: 'Key', + key: 'Key' + }, + { + title: 'Size', + dataIndex: 'DataSize', + key: 'DataSize', + render: (dataSize: number) =>
    {size(dataSize)}
    + }, + { + title: 'Date Created', + dataIndex: 'CreationTime', + key: 'CreationTime', + render: (date: string) => moment(date).format('lll') + }, + { + title: 'Date Modified', + dataIndex: 'ModificationTime', + key: 'ModificationTime', + render: (date: string) => moment(date).format('lll') + } +]; + +//-----Components------ +const ExpandedKeyTable: React.FC = ({ + loading, + data, + paginationConfig +}) => { + return ( +
    + ) +} + +export default ExpandedKeyTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedPendingKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedPendingKeysTable.tsx new file mode 100644 index 00000000000..accb390303b --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedPendingKeysTable.tsx @@ -0,0 +1,81 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; + +import { byteToSize } from '@/utils/common'; +import { getFormattedTime } from '@/v2/utils/momentUtils'; + +import { DeletePendingKey } from '@/v2/types/insights.types'; + +//--------Types-------- +type ExpandedPendingKeysTableProps = { + data: DeletePendingKey[]; + paginationConfig: TablePaginationConfig; +} + +//--------Constants-------- +const COLUMNS: ColumnsType = [{ + title: 'Data Size', + dataIndex: 'dataSize', + key: 'dataSize', + render: (dataSize: any) => dataSize = dataSize > 0 ? byteToSize(dataSize, 1) : dataSize +}, +{ + title: 'Replicated Data Size', + dataIndex: 'replicatedSize', + key: 'replicatedSize', + render: (replicatedSize: any) => replicatedSize = replicatedSize > 0 ? byteToSize(replicatedSize, 1) : replicatedSize +}, +{ + title: 'Creation Time', + dataIndex: 'creationTime', + key: 'creationTime', + render: (creationTime: number) => { + return getFormattedTime(creationTime, 'll LTS'); + } +}, +{ + title: 'Modification Time', + dataIndex: 'modificationTime', + key: 'modificationTime', + render: (modificationTime: number) => { + return getFormattedTime(modificationTime, 'll LTS'); + } +}] + +//--------Component-------- +const ExpandedPendingKeysTable: React.FC = ({ + data, + paginationConfig +}) => { + return ( +
    + ) +} + +export default ExpandedPendingKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/openKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/openKeysTable.tsx new file mode 100644 index 00000000000..02c73c77528 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/openKeysTable.tsx @@ -0,0 +1,213 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import { AxiosError } from 'axios'; +import { + Dropdown, + Menu, + Table +} from 'antd'; +import { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { MenuProps } from 'antd/es/menu'; +import { FilterFilled } from '@ant-design/icons'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; +import { getFormattedTime } from '@/v2/utils/momentUtils'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { OpenKeys, OpenKeysResponse } from '@/v2/types/insights.types'; + + +//--------Types-------- +type OpenKeysTableProps = { + limit: Option; + paginationConfig: TablePaginationConfig; + handleLimitChange: (arg0: ValueType) => void; +} + +//-----Components------ +const OpenKeysTable: React.FC = ({ + limit, + paginationConfig, + handleLimitChange +}) => { + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: OpenKeys[] | undefined) { + return data?.filter( + (data: OpenKeys) => data.path.includes(debouncedSearch) + ); + } + + function fetchOpenKeys(isFso: boolean) { + setLoading(true); + + const { request, controller } = AxiosGetHelper( + `/api/v1/keys/open?includeFso=${isFso}&includeNonFso=${!isFso}&limit=${limit.value}`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + const openKeys: OpenKeysResponse = response?.data ?? { 'fso': [] }; + let allOpenKeys: OpenKeys[]; + if (isFso) { + allOpenKeys = openKeys['fso']?.map((key: OpenKeys) => ({ + ...key, + type: 'FSO' + })) ?? []; + } else { + allOpenKeys = openKeys['nonFSO']?.map((key: OpenKeys) => ({ + ...key, + type: 'Non FSO' + })) ?? []; + } + + setData(allOpenKeys); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }); + } + + const handleKeyTypeChange: MenuProps['onClick'] = (e) => { + if (e.key === 'fso') { + fetchOpenKeys(true); + } else { + fetchOpenKeys(false); + } + } + + const COLUMNS: ColumnsType = [{ + title: 'Key Name', + dataIndex: 'path', + key: 'path' + }, + { + title: 'Size', + dataIndex: 'size', + key: 'size', + render: (size: any) => size = byteToSize(size, 1) + }, + { + title: 'Path', + dataIndex: 'key', + key: 'key', + width: '270px' + }, + { + title: 'In state since', + dataIndex: 'inStateSince', + key: 'inStateSince', + render: (inStateSince: number) => { + return getFormattedTime(inStateSince, 'll LTS'); + } + }, + { + title: 'Replication Factor', + dataIndex: 'replicationInfo', + key: 'replicationfactor', + render: (replicationInfo: any) => ( +
    + {Object.values(replicationInfo)[0]} +
    + ) + }, + { + title: 'Replication Type', + dataIndex: 'replicationInfo', + key: 'replicationtype', + render: (replicationInfo: any) => ( +
    + { +
    + {Object.values(replicationInfo)[2]} +
    + } +
    + ) + }, { + title: <> + + FSO + Non-FSO + + }> + + + , + dataIndex: 'type', + key: 'type', + render: (type: string) =>
    {type}
    + }]; + + React.useEffect(() => { + // Fetch FSO open keys by default + fetchOpenKeys(true); + + return (() => cancelSignal.current && cancelSignal.current.abort()); + }, [limit.value]); + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ); +} + +export default OpenKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx new file mode 100644 index 00000000000..6c07749436d --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/pipelinesTable.tsx @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import Tooltip from 'antd/es/tooltip'; +import { InfoCircleOutlined } from '@ant-design/icons'; + +import { ReplicationIcon } from '@/utils/themeIcons'; +import { getDurationFromTimestamp, getTimeDiffFromTimestamp } from '@/v2/utils/momentUtils'; +import { Pipeline, PipelinesTableProps, PipelineStatusList } from '@/v2/types/pipelines.types'; + + +// TODO: When Datanodes PR gets merged remove these declarations +// And import from datanodes.types + +type SummaryDatanodeDetails = { + level: number; + parent: unknown | null; + cost: number; + uuid: string; + uuidString: string; + ipAddress: string; + hostName: string; + ports: { + name: string; + value: number + }[]; + certSerialId: null, + version: string | null; + setupTime: number; + revision: string | null; + buildDate: string; + persistedOpState: string; + persistedOpStateExpiryEpochSec: number; + initialVersion: number; + currentVersion: number; + signature: number; + decommissioned: boolean; + networkName: string; + networkLocation: string; + networkFullPath: string; + numOfLeaves: number; +} + +export const COLUMNS: ColumnsType = [ + { + title: 'Pipeline ID', + dataIndex: 'pipelineId', + key: 'pipelineId', + sorter: (a: Pipeline, b: Pipeline) => a.pipelineId.localeCompare(b.pipelineId), + + }, + { + title: 'Replication Type & Factor', + dataIndex: 'replicationType', + key: 'replicationType', + render: (replicationType: string, record: Pipeline) => { + const replicationFactor = record.replicationFactor; + return ( + + + {replicationType} ({replicationFactor}) + + ); + }, + sorter: (a: Pipeline, b: Pipeline) => + (a.replicationType + a.replicationFactor.toString()).localeCompare(b.replicationType + b.replicationFactor.toString()), + defaultSortOrder: 'descend' as const + }, + { + title: 'Status', + dataIndex: 'status', + key: 'status', + filterMultiple: true, + filters: PipelineStatusList.map(status => ({ text: status, value: status })), + onFilter: (value, record: Pipeline) => record.status === value, + sorter: (a: Pipeline, b: Pipeline) => a.status.localeCompare(b.status) + }, + { + title: 'Containers', + dataIndex: 'containers', + key: 'containers', + sorter: (a: Pipeline, b: Pipeline) => a.containers - b.containers + }, + { + title: 'Datanodes', + dataIndex: 'datanodes', + key: 'datanodes', + render: (datanodes: SummaryDatanodeDetails[]) => ( +
    + {datanodes.map(datanode => ( +
    + triggerNode}> + {datanode?.hostName ?? 'N/A'} + +
    + ))} +
    + ) + }, + { + title: 'Leader', + dataIndex: 'leaderNode', + key: 'leaderNode', + sorter: (a: Pipeline, b: Pipeline) => a.leaderNode.localeCompare(b.leaderNode) + }, + { + title: () => ( + + Last Leader Election  + + + + + ), + dataIndex: 'lastLeaderElection', + key: 'lastLeaderElection', + render: (lastLeaderElection: number) => lastLeaderElection > 0 ? + getTimeDiffFromTimestamp(lastLeaderElection) : 'NA', + sorter: (a: Pipeline, b: Pipeline) => a.lastLeaderElection - b.lastLeaderElection + }, + { + title: 'Lifetime', + dataIndex: 'duration', + key: 'duration', + render: (duration: number) => getDurationFromTimestamp(duration), + sorter: (a: Pipeline, b: Pipeline) => a.duration - b.duration + }, + { + title: () => ( + + No. of Elections  + + + + + ), + dataIndex: 'leaderElections', + key: 'leaderElections', + render: (leaderElections: number) => leaderElections > 0 ? + leaderElections : 'NA', + sorter: (a: Pipeline, b: Pipeline) => a.leaderElections - b.leaderElections + } +]; + +const PipelinesTable: React.FC = ({ + loading = false, + data, + selectedColumns, + searchTerm = '' +}) => { + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} pipelines`, + showSizeChanger: true, + }; + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ) + } + + function getFilteredData(data: Pipeline[]) { + return data.filter( + (pipeline: Pipeline) => pipeline['pipelineId'].includes(searchTerm) + ) + } + + return ( +
    +
    + + ) +} + +export default PipelinesTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx new file mode 100644 index 00000000000..ecfbf730a2a --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import QuotaBar from '@/components/quotaBar/quotaBar'; +import { byteToSize } from '@/utils/common'; +import { Volume, VolumesTableProps } from '@/v2/types/volume.types'; +import Table, { ColumnsType, ColumnType, TablePaginationConfig } from 'antd/es/table'; +import moment from 'moment'; +import React from 'react'; +import { Link } from 'react-router-dom'; + +export const COLUMNS: ColumnsType = [ + { + title: 'Volume', + dataIndex: 'volume', + key: 'volume', + sorter: (a: Volume, b: Volume) => a.volume.localeCompare(b.volume), + defaultSortOrder: 'ascend' as const, + width: '15%' + }, + { + title: 'Owner', + dataIndex: 'owner', + key: 'owner', + sorter: (a: Volume, b: Volume) => a.owner.localeCompare(b.owner) + }, + { + title: 'Admin', + dataIndex: 'admin', + key: 'admin', + sorter: (a: Volume, b: Volume) => a.admin.localeCompare(b.admin) + }, + { + title: 'Creation Time', + dataIndex: 'creationTime', + key: 'creationTime', + sorter: (a: Volume, b: Volume) => a.creationTime - b.creationTime, + render: (creationTime: number) => { + return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Modification Time', + dataIndex: 'modificationTime', + key: 'modificationTime', + sorter: (a: Volume, b: Volume) => a.modificationTime - b.modificationTime, + render: (modificationTime: number) => { + return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Quota (Size)', + dataIndex: 'quotaInBytes', + key: 'quotaInBytes', + render: (quotaInBytes: number) => { + return quotaInBytes && quotaInBytes !== -1 ? byteToSize(quotaInBytes, 3) : 'NA'; + } + }, + { + title: 'Namespace Capacity', + key: 'namespaceCapacity', + sorter: (a: Volume, b: Volume) => a.usedNamespace - b.usedNamespace, + render: (text: string, record: Volume) => ( + + ) + }, +]; + +const VolumesTable: React.FC = ({ + loading = false, + data, + handleAclClick, + selectedColumns, + searchColumn = 'volume', + searchTerm = '' +}) => { + + React.useEffect(() => { + // On table mount add the actions column + const actionsColumn: ColumnType = { + title: 'Actions', + key: 'actions', + render: (_: any, record: Volume) => { + const searchParams = new URLSearchParams(); + searchParams.append('volume', record.volume); + + return ( + <> + + Show buckets + + handleAclClick(record)}> + Show ACL + + + ); + } + } + + if (COLUMNS.length > 0 && COLUMNS[COLUMNS.length - 1].key !== 'actions') { + // Push the ACL column for initial + COLUMNS.push(actionsColumn); + selectedColumns.push({ + label: actionsColumn.title as string, + value: actionsColumn.key as string + }); + } else { + // Replace old ACL column with new ACL column with correct reference + // e.g. After page is reloaded / redirect from other page + COLUMNS[COLUMNS.length - 1] = actionsColumn; + selectedColumns[selectedColumns.length - 1] = { + label: actionsColumn.title as string, + value: actionsColumn.key as string + } + } + + }, []); + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ) + } + + function getFilteredData(data: Volume[]) { + return data.filter( + (volume: Volume) => volume[searchColumn].includes(searchTerm) + ); + } + + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} volumes`, + showSizeChanger: true + }; + + return ( +
    +
    + + ) +} + +export default VolumesTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/breadcrumbs.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/breadcrumbs.constants.tsx new file mode 100644 index 00000000000..807a68cc8d2 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/breadcrumbs.constants.tsx @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +type BreadcrumbNameMap = { + [path: string]: string; +} + +export const breadcrumbNameMap: BreadcrumbNameMap = { + '/Overview': 'Overview', + '/Volumes': 'Volumes', + '/Buckets': 'Buckets', + '/Datanodes': 'Datanodes', + '/Pipelines': 'Pipelines', + '/Containers': 'Containers', + '/Insights': 'Insights', + '/DiskUsage': 'Disk Usage', + '/Heatmap': 'Heatmap', + '/Om': 'OM DB Insights' +}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/heatmap.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/heatmap.constants.tsx new file mode 100644 index 00000000000..63a8476648f --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/heatmap.constants.tsx @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export const colourScheme = { + amberAlert: [ + '#FFCF88', + '#FFCA87', + '#FFC586', + '#FFC085', + '#FFBB83', + '#FFB682', + '#FFB181', + '#FFA676', + '#FF9F6F', + '#FF9869', + '#FF9262', + '#FF8B5B', + '#FF8455', + '#FF7D4E', + '#FF8282', + '#FF7776', + '#FF6D6A', + '#FF625F', + '#FF5753', + '#FF4D47', + '#FF423B' + ] +}; + +export const TIME_PERIODS: string[] = ['24H', '7D', '90D'] +export const ENTITY_TYPES: string[] = ['key', 'bucket', 'volume'] +export const ROOT_PATH = '/' diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/limit.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/limit.constants.tsx new file mode 100644 index 00000000000..b76c51c8960 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/limit.constants.tsx @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Option } from '@/v2/components/select/singleSelect'; + +export const LIMIT_OPTIONS: Option[] = [ + { + label: '1000', + value: '1000' + }, + { + label: '5000', + value: '5000' + }, + { + label: '10000', + value: '10000' + }, + { + label: '20000', + value: '20000' + } +]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less new file mode 100644 index 00000000000..8f4c8ffaf9f --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less @@ -0,0 +1,41 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.content-div { + min-height: unset; + + .table-header-section { + display: flex; + justify-content: space-between; + align-items: center; + + .table-filter-section { + font-size: 14px; + font-weight: normal; + display: flex; + column-gap: 8px; + padding: 16px 8px; + } + } + + .tag-block { + display: flex; + column-gap: 8px; + padding: 0px 8px 16px 8px; + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx new file mode 100644 index 00000000000..1c039f42709 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx @@ -0,0 +1,326 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useEffect, useRef, useState } from 'react'; +import moment from 'moment'; +import { ValueType } from 'react-select'; +import { useLocation } from 'react-router-dom'; + +import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; +import AclPanel from '@/v2/components/aclDrawer/aclDrawer'; +import Search from '@/v2/components/search/search'; +import MultiSelect from '@/v2/components/select/multiSelect'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import BucketsTable, { COLUMNS } from '@/v2/components/tables/bucketsTable'; + +import { AutoReloadHelper } from '@/utils/autoReloadHelper'; +import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; +import { showDataFetchError } from '@/utils/common'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; + +import { + Bucket, + BucketResponse, + BucketsState, +} from '@/v2/types/bucket.types'; + +import './buckets.less'; + +const SearchableColumnOpts = [{ + label: 'Bucket', + value: 'name' +}, { + label: 'Volume', + value: 'volumeName' +}] + +const defaultColumns = COLUMNS.map(column => ({ + label: column.title as string, + value: column.key as string +})); + +function getVolumeBucketMap(data: Bucket[]) { + const volumeBucketMap = data.reduce(( + map: Map>, + currentBucket + ) => { + const volume = currentBucket.volumeName; + if (map.has(volume)) { + const buckets = Array.from(map.get(volume)!); + map.set(volume, new Set([...buckets, currentBucket])); + } else { + map.set(volume, new Set().add(currentBucket)); + } + return map; + }, new Map>()); + return volumeBucketMap; +} + +function getFilteredBuckets( + selectedVolumes: Option[], + bucketsMap: Map> +) { + let selectedBuckets: Bucket[] = []; + selectedVolumes.forEach(selectedVolume => { + if (bucketsMap.has(selectedVolume.value) + && bucketsMap.get(selectedVolume.value)) { + selectedBuckets = [ + ...selectedBuckets, + ...Array.from(bucketsMap.get(selectedVolume.value)!) + ]; + } + }); + + return selectedBuckets; +} + +const Buckets: React.FC<{}> = () => { + + const cancelSignal = useRef(); + + const [state, setState] = useState({ + totalCount: 0, + lastUpdated: 0, + columnOptions: defaultColumns, + volumeBucketMap: new Map>(), + bucketsUnderVolume: [], + volumeOptions: [], + }); + const [loading, setLoading] = useState(false); + const [selectedColumns, setSelectedColumns] = useState(defaultColumns); + const [selectedVolumes, setSelectedVolumes] = useState([]); + const [selectedLimit, setSelectedLimit] = useState+ + {plotResponse.fileCountResponse?.length > 0 + ? + : } + + + + + + {plotResponse.containerCountResponse?.length > 0 + ? + : } + + + + + } + + + ) + +} + +export default Insights; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/omInsights.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/omInsights.tsx new file mode 100644 index 00000000000..732af0aa00e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/omInsights.tsx @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import { ValueType } from 'react-select'; +import { Tabs, Tooltip } from 'antd'; +import { TablePaginationConfig } from 'antd/es/table'; +import { InfoCircleOutlined } from '@ant-design/icons'; + +import { Option } from '@/v2/components/select/singleSelect'; +import ContainerMismatchTable from '@/v2/components/tables/insights/containerMismatchTable'; +import DeletedContainerKeysTable from '@/v2/components/tables/insights/deletedContainerKeysTable'; +import DeletePendingDirTable from '@/v2/components/tables/insights/deletePendingDirsTable'; +import DeletePendingKeysTable from '@/v2/components/tables/insights/deletePendingKeysTable'; +import ExpandedKeyTable from '@/v2/components/tables/insights/expandedKeyTable'; +import OpenKeysTable from '@/v2/components/tables/insights/openKeysTable'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; + +import { + Container, + ExpandedRow, + ExpandedRowState, + MismatchKeysResponse +} from '@/v2/types/insights.types'; + +import './insights.less'; +import { useLocation } from 'react-router-dom'; + + +const OMDBInsights: React.FC<{}> = () => { + + const [loading, setLoading] = React.useState(false); + const [expandedRowData, setExpandedRowData] = React.useState({}); + const [selectedLimit, setSelectedLimit] = React.useState = () => { ) } ]} - linkToUrl='/Om' /> + linkToUrl='/Om' + state={{activeTab: '3'}} /> + + OM ID:  + {omServiceId} + + | + + SCM ID:  + {scmServiceId} + ); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.less new file mode 100644 index 00000000000..a2fb93f7dad --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.less @@ -0,0 +1,48 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.content-div { + min-height: unset; + + .table-header-section { + display: flex; + justify-content: space-between; + align-items: center; + + .table-filter-section { + font-size: 14px; + font-weight: normal; + display: flex; + column-gap: 8px; + padding: 16px 8px; + } + } + + .uuid-tooltip { + cursor: pointer; + .ant-tooltip-inner { + width: max-content; + } + } + + .tag-block { + display: flex; + column-gap: 8px; + padding: 0px 8px 16px 8px; + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx new file mode 100644 index 00000000000..f6ff87c7e13 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { + useEffect, + useRef, + useState +} from 'react'; +import moment from 'moment'; +import { ValueType } from 'react-select'; + +import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; +import Search from '@/v2/components/search/search'; +import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; +import PipelinesTable, { COLUMNS } from '@/v2/components/tables/pipelinesTable'; +import { showDataFetchError } from '@/utils/common'; +import { AutoReloadHelper } from '@/utils/autoReloadHelper'; +import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; + +import { + Pipeline, + PipelinesResponse, + PipelinesState +} from '@/v2/types/pipelines.types'; + +import './pipelines.less'; + + +const defaultColumns = COLUMNS.map(column => ({ + label: (typeof column.title === 'string') + ? column.title + : (column.title as Function)().props.children[0], + value: column.key as string, +})); + +const Pipelines: React.FC<{}> = () => { + const cancelSignal = useRef(); + + const [state, setState] = useState({ + activeDataSource: [], + columnOptions: defaultColumns, + lastUpdated: 0, + }); + const [loading, setLoading] = useState(false); + const [selectedColumns, setSelectedColumns] = useState(defaultColumns); + const [searchTerm, setSearchTerm] = useState(''); + + const debouncedSearch = useDebounce(searchTerm, 300); + + const loadData = () => { + setLoading(true); + //Cancel any previous requests + cancelRequests([cancelSignal.current!]); + + const { request, controller } = AxiosGetHelper( + '/api/v1/pipelines', + cancelSignal.current + ); + + cancelSignal.current = controller; + request.then(response => { + const pipelinesResponse: PipelinesResponse = response.data; + const pipelines: Pipeline[] = pipelinesResponse?.pipelines ?? {}; + setState({ + ...state, + activeDataSource: pipelines, + lastUpdated: Number(moment()) + }) + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError(error.toString()); + }) + } + + const autoReloadHelper: AutoReloadHelper = new AutoReloadHelper(loadData); + + useEffect(() => { + autoReloadHelper.startPolling(); + loadData(); + return (() => { + autoReloadHelper.stopPolling(); + cancelRequests([cancelSignal.current!]); + }) + }, []); + + function handleColumnChange(selected: ValueType) { + setSelectedColumns(selected as Option[]); + } + + const { + activeDataSource, + columnOptions, + lastUpdated + } = state; + + return ( + <> +
    + Pipelines + +
    +
    +
    +
    +
    + { }} + fixedColumn='pipelineId' + columnLength={COLUMNS.length} /> +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    + +
    +
    + + ); +} +export default Pipelines; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx index a5918ac6ce6..b4614d387f3 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx @@ -16,26 +16,21 @@ * limitations under the License. */ -import React, { useEffect, useState } from 'react'; +import React, { useEffect, useRef, useState } from 'react'; import moment from 'moment'; -import { Table } from 'antd'; -import { Link } from 'react-router-dom'; -import { - TablePaginationConfig, - ColumnsType -} from 'antd/es/table'; import { ValueType } from 'react-select/src/types'; -import QuotaBar from '@/components/quotaBar/quotaBar'; import AclPanel from '@/v2/components/aclDrawer/aclDrawer'; import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; -import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; import SingleSelect from '@/v2/components/select/singleSelect'; +import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; +import VolumesTable, { COLUMNS } from '@/v2/components/tables/volumesTable'; import Search from '@/v2/components/search/search'; -import { byteToSize, showDataFetchError } from '@/utils/common'; +import { showDataFetchError } from '@/utils/common'; import { AutoReloadHelper } from '@/utils/autoReloadHelper'; -import { AxiosGetHelper } from "@/utils/axiosRequestHelper"; +import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; import { useDebounce } from '@/v2/hooks/debounce.hook'; import { @@ -61,103 +56,9 @@ const SearchableColumnOpts = [ } ] -const LIMIT_OPTIONS: Option[] = [ - { label: '1000', value: '1000' }, - { label: '5000', value: "5000" }, - { label: '10000', value: "10000" }, - { label: '20000', value: "20000" } -] - const Volumes: React.FC<{}> = () => { - let cancelSignal: AbortController; - - const COLUMNS: ColumnsType = [ - { - title: 'Volume', - dataIndex: 'volume', - key: 'volume', - sorter: (a: Volume, b: Volume) => a.volume.localeCompare(b.volume), - defaultSortOrder: 'ascend' as const, - width: '15%' - }, - { - title: 'Owner', - dataIndex: 'owner', - key: 'owner', - sorter: (a: Volume, b: Volume) => a.owner.localeCompare(b.owner) - }, - { - title: 'Admin', - dataIndex: 'admin', - key: 'admin', - sorter: (a: Volume, b: Volume) => a.admin.localeCompare(b.admin) - }, - { - title: 'Creation Time', - dataIndex: 'creationTime', - key: 'creationTime', - sorter: (a: Volume, b: Volume) => a.creationTime - b.creationTime, - render: (creationTime: number) => { - return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Modification Time', - dataIndex: 'modificationTime', - key: 'modificationTime', - sorter: (a: Volume, b: Volume) => a.modificationTime - b.modificationTime, - render: (modificationTime: number) => { - return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; - } - }, - { - title: 'Quota (Size)', - dataIndex: 'quotaInBytes', - key: 'quotaInBytes', - render: (quotaInBytes: number) => { - return quotaInBytes && quotaInBytes !== -1 ? byteToSize(quotaInBytes, 3) : 'NA'; - } - }, - { - title: 'Namespace Capacity', - key: 'namespaceCapacity', - sorter: (a: Volume, b: Volume) => a.usedNamespace - b.usedNamespace, - render: (text: string, record: Volume) => ( - - ) - }, - { - title: 'Actions', - key: 'actions', - render: (_: any, record: Volume) => { - const searchParams = new URLSearchParams(); - searchParams.append('volume', record.volume); - - return ( - <> - - Show buckets - - handleAclLinkClick(record)}> - Show ACL - - - ); - } - } - ]; + const cancelSignal = useRef(); const defaultColumns = COLUMNS.map(column => ({ label: column.title as string, @@ -167,10 +68,10 @@ const Volumes: React.FC<{}> = () => { const [state, setState] = useState({ data: [], lastUpdated: 0, - columnOptions: defaultColumns, - currentRow: {} + columnOptions: defaultColumns }); const [loading, setLoading] = useState(false); + const [currentRow, setCurrentRow] = useState>({}); const [selectedColumns, setSelectedColumns] = useState(defaultColumns); const [selectedLimit, setSelectedLimit] = useState
    - + import('@/v2/pages/overview/overview')); const Volumes = lazy(() => import('@/v2/pages/volumes/volumes')) +const Buckets = lazy(() => import('@/v2/pages/buckets/buckets')); +const Datanodes = lazy(() => import('@/v2/pages/datanodes/datanodes')); +const Pipelines = lazy(() => import('@/v2/pages/pipelines/pipelines')); +const DiskUsage = lazy(() => import('@/v2/pages/diskUsage/diskUsage')); +const Containers = lazy(() => import('@/v2/pages/containers/containers')); +const Insights = lazy(() => import('@/v2/pages/insights/insights')); +const OMDBInsights = lazy(() => import('@/v2/pages/insights/omInsights')); +const Heatmap = lazy(() => import('@/v2/pages/heatmap/heatmap')); + export const routesV2 = [ { @@ -28,5 +37,37 @@ export const routesV2 = [ { path: '/Volumes', component: Volumes + }, + { + path: '/Buckets', + component: Buckets + }, + { + path: '/Datanodes', + component: Datanodes + }, + { + path: '/Pipelines', + component: Pipelines + }, + { + path: '/DiskUsage', + component: DiskUsage + }, + { + path: '/Containers', + component: Containers + }, + { + path: '/Insights', + component: Insights + }, + { + path: '/Om', + component: OMDBInsights + }, + { + path: '/Heatmap', + component: Heatmap } ]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts index 8b2fd0c694c..eb499dc617e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/bucket.types.ts @@ -17,6 +17,7 @@ */ import { Acl } from "@/v2/types/acl.types"; +import { Option as MultiOption } from "@/v2/components/select/multiSelect"; // Corresponds to OzoneManagerProtocolProtos.StorageTypeProto export const BucketStorageTypeList = [ @@ -38,8 +39,8 @@ export type BucketLayout = typeof BucketLayoutTypeList[number]; export type Bucket = { volumeName: string; - bucketName: string; - isVersionEnabled: boolean; + name: string; + versioning: boolean; storageType: BucketStorage; creationTime: number; modificationTime: number; @@ -53,3 +54,26 @@ export type Bucket = { acls?: Acl[]; bucketLayout: BucketLayout; } + +export type BucketResponse = { + totalCount: number; + buckets: Bucket[]; +} + +export type BucketsState = { + totalCount: number; + lastUpdated: number; + columnOptions: MultiOption[]; + volumeBucketMap: Map>; + bucketsUnderVolume: Bucket[]; + volumeOptions: MultiOption[]; +} + +export type BucketsTableProps = { + loading: boolean; + data: Bucket[]; + handleAclClick: (arg0: Bucket) => void; + selectedColumns: MultiOption[]; + searchColumn: 'name' | 'volumeName'; + searchTerm: string; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/container.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/container.types.ts new file mode 100644 index 00000000000..2467a0f26fd --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/container.types.ts @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Option } from "@/v2/components/select/multiSelect"; + +export type ContainerReplica = { + containerId: number; + datanodeUuid: string; + datanodeHost: string; + firstSeenTime: number; + lastSeenTime: number; + lastBcsId: number; +} + +export type Container = { + containerID: number; + containerState: string; + unhealthySince: number; + expectedReplicaCount: number; + actualReplicaCount: number; + replicaDeltaCount: number; + reason: string; + keys: number; + pipelineID: string; + replicas: ContainerReplica[]; +} + +type KeyResponseBlock = { + containerID: number; + localID: number; +} + +export type KeyResponse = { + Volume: string; + Bucket: string; + Key: string; + DataSize: number; + CompletePath: string; + Versions: number[]; + Blocks: Record; + CreationTime: string; + ModificationTime: string; +} + +export type ContainerKeysResponse = { + totalCount: number; + keys: KeyResponse[]; +} + +export type ContainerTableProps = { + loading: boolean; + data: Container[]; + searchColumn: 'containerID' | 'pipelineID'; + searchTerm: string; + selectedColumns: Option[]; + expandedRow: ExpandedRow; + expandedRowSetter: (arg0: ExpandedRow) => void; +} + + +export type ExpandedRow = { + [key: number]: ExpandedRowState; +} + +export type ExpandedRowState = { + loading: boolean; + containerId: number; + dataSource: KeyResponse[]; + totalCount: number; +} + +export type ContainerState = { + lastUpdated: number; + columnOptions: Option[]; + missingContainerData: Container[]; + underReplicatedContainerData: Container[]; + overReplicatedContainerData: Container[]; + misReplicatedContainerData: Container[]; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts new file mode 100644 index 00000000000..96a37020153 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Pipeline } from "@/v2/types/pipelines.types"; +import { StorageReport } from "@/v2/types/overview.types"; +import { Option as MultiOption } from "@/v2/components/select/multiSelect"; + +// Corresponds to HddsProtos.NodeState +export const DatanodeStateList = ['HEALTHY', 'STALE', 'DEAD'] as const; +type DatanodeStateType = typeof DatanodeStateList; +export type DatanodeState = DatanodeStateType[number]; + +// Corresponds to HddsProtos.NodeOperationalState +export const DatanodeOpStateList = [ + 'IN_SERVICE', + 'DECOMMISSIONING', + 'DECOMMISSIONED', + 'ENTERING_MAINTENANCE', + 'IN_MAINTENANCE' +] as const; +export type DatanodeOpState = typeof DatanodeOpStateList[number]; + +export type DatanodeResponse = { + hostname: string; + state: DatanodeState; + opState: DatanodeOpState; + lastHeartbeat: string; + storageReport: StorageReport; + pipelines: Pipeline[]; + containers: number; + openContainers: number; + leaderCount: number; + uuid: string; + version: string; + setupTime: number; + revision: string; + buildDate: string; + networkLocation: string; +} + +export type DatanodesResponse = { + totalCount: number; + datanodes: DatanodeResponse[]; +} + +export type Datanode = { + hostname: string; + state: DatanodeState; + opState: DatanodeOpState; + lastHeartbeat: string; + storageUsed: number; + storageTotal: number; + storageRemaining: number; + storageCommitted: number; + pipelines: Pipeline[]; + containers: number; + openContainers: number; + leaderCount: number; + uuid: string; + version: string; + setupTime: number; + revision: string; + buildDate: string; + networkLocation: string; +} + +export type DatanodeDetails = { + uuid: string; +} + +export type DatanodeDecomissionInfo = { + datanodeDetails: DatanodeDetails +} + +export type DatanodesState = { + dataSource: Datanode[]; + lastUpdated: number; + columnOptions: MultiOption[]; +} + +// Datanode Summary endpoint types +type summaryByteString = { + string: string; + bytes: { + validUtf8: boolean; + empty: boolean; + } +} + +type SummaryPort = { + name: string; + value: number; +} + +type SummaryDatanodeDetails = { + level: number; + parent: unknown | null; + cost: number; + uuid: string; + uuidString: string; + ipAddress: string; + hostName: string; + ports: SummaryPort; + certSerialId: null, + version: string | null; + setupTime: number; + revision: string | null; + buildDate: string; + persistedOpState: string; + persistedOpStateExpiryEpochSec: number; + initialVersion: number; + currentVersion: number; + decommissioned: boolean; + maintenance: boolean; + ipAddressAsByteString: summaryByteString; + hostNameAsByteString: summaryByteString; + networkName: string; + networkLocation: string; + networkFullPath: string; + numOfLeaves: number; + networkNameAsByteString: summaryByteString; + networkLocationAsByteString: summaryByteString +} + +type SummaryMetrics = { + decommissionStartTime: string; + numOfUnclosedPipelines: number; + numOfUnderReplicatedContainers: number; + numOfUnclosedContainers: number; +} + +type SummaryContainers = { + UnderReplicated: string[]; + UnClosed: string[]; +} + +export type SummaryData = { + datanodeDetails: SummaryDatanodeDetails; + metrics: SummaryMetrics; + containers: SummaryContainers; +} + +export type DatanodeTableProps = { + loading: boolean; + selectedRows: React.Key[]; + data: Datanode[]; + decommissionUuids: string | string[]; + searchColumn: 'hostname' | 'uuid' | 'version' | 'revision'; + searchTerm: string; + selectedColumns: MultiOption[]; + handleSelectionChange: (arg0: React.Key[]) => void; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/diskUsage.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/diskUsage.types.ts new file mode 100644 index 00000000000..e649c143aec --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/diskUsage.types.ts @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export type DUSubpath = { + path: string; + size: number; + sizeWithReplica: number; + isKey: boolean; +} + +export type DUResponse = { + status: string; + path: string; + subPathCount: number; + size: number; + sizeWithReplica: number; + subPaths: DUSubpath[]; + sizeDirectKey: number; +} + +export type PlotData = { + value: number; + name: string; + size: string; + percentage: string; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/heatmap.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/heatmap.types.ts new file mode 100644 index 00000000000..a76db22a6fe --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/heatmap.types.ts @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export type InputPathValidTypes = 'error' | 'success' | 'warning' | 'validating' | undefined; + +export type HeatmapChild = { + label: string; + size: number; + accessCount: number; + color: number; +} + +export type InputPathState = { + inputPath: string; + isInputPathValid: InputPathValidTypes; + helpMessage: string; +} + +export type HeatmapResponse = { + label: string; + path: string; + maxAccessCount: number; + minAccessCount: number; + size: number; + children: HeatmapChild[]; +} + +export type HeatmapState = { + heatmapResponse: HeatmapResponse; + entityType: string; + date: string | number; +} + +export interface IResponseError extends Error { + status?: number; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/insights.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/insights.types.ts new file mode 100644 index 00000000000..d608a2bc8d1 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/insights.types.ts @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Option } from "@/v2/components/select/multiSelect"; + +export type FileCountResponse = { + volume: string; + bucket: string; + fileSize: number; + count: number; +} + +export type ContainerCountResponse = { + containerSize: number; + count: number; +} + +export type PlotResponse = { + fileCountResponse: FileCountResponse[], + containerCountResponse: ContainerCountResponse[] +} + +export type FilePlotData = { + fileCountValues: string[]; + fileCountMap: Map; +} + +export type ContainerPlotData = { + containerCountValues: string[]; + containerCountMap: Map; +} + +export type InsightsState = { + volumeBucketMap: Map>; + volumeOptions: Option[]; + fileCountError: string | undefined; + containerSizeError: string | undefined; +} + +//-------------------------// +//---OM DB Insights types--- +//-------------------------// +type ReplicationConfig = { + replicationFactor: string; + requiredNodes: number; + replicationType: string; +} + +export type Pipelines = { + id: { + id: string; + }, + replicationConfig: ReplicationConfig; + healthy: boolean; +} + +// Container Mismatch Info +export type Container = { + containerId: number; + numberOfKeys: number; + pipelines: Pipelines[]; + existsAt: 'OM' | 'SCM'; +} + +export type MismatchContainersResponse = { + containerDiscrepancyInfo: Container[]; +} + +// Deleted Container Keys +export type DeletedContainerKeysResponse = { + containers: Container[]; +} + +export type MismatchKeys = { + Volume: string; + Bucket: string; + Key: string; + DataSize: number; + Versions: number[]; + Blocks: Record + CreationTime: string; + ModificationTime: string; +} + +export type MismatchKeysResponse = { + totalCount: number; + keys: MismatchKeys[]; +} + +// Open Keys +export type OpenKeys = { + key: string; + path: string; + inStateSince: number; + size: number; + replicatedSize: number; + replicationInfo: { + data: number; + parity: number; + ecChunkSize: number; + codec: string; + replicationType: string; + requiredNodes: number; + } + creationTime: number; + modificationTime: number; + isKey: boolean; +} + +export type OpenKeysResponse = { + lastKey: string; + replicatedDataSize: number; + unreplicatedDataSize: number; + fso?: OpenKeys[]; + nonFSO?: OpenKeys[]; +} + +//Keys pending deletion +export type DeletePendingKey = { + objectID: number; + updateID: number; + parentObjectID: number; + volumeName: string; + bucketName: string; + keyName: string; + dataSize: number; + creationTime: number; + modificationTime: number; + replicationConfig: ReplicationConfig; + fileChecksum: number | null; + fileName: string; + file: boolean; + path: string; + hsync: boolean; + replicatedSize: number; + fileEncryptionInfo: string | null; + objectInfo: string; + updateIDSet: boolean; +} + +export type DeletePendingKeysResponse = { + lastKey: string; + keysSummary: { + totalUnreplicatedDataSize: number, + totalReplicatedDataSize: number, + totalDeletedKeys: number + }, + replicatedDataSize: number; + unreplicatedDataSize: number; + deletedKeyInfo: { + omKeyInfoList: DeletePendingKey[] + }[]; +} + +//Directories Pending for Deletion +export type DeletedDirInfo = { + key: string; + path: string; + inStateSince: number; + size: number; + replicatedSize: number; + replicationInfo: ReplicationConfig; + creationTime: number; + modificationTime: number; + isKey: boolean; +} + +export type DeletedDirReponse = { + lastKey: string; + replicatedDataSize: number; + unreplicatedDataSize: number; + deletedDirInfo: DeletedDirInfo[]; + status: string; +} + +export type ExpandedRow = { + [key: number]: ExpandedRowState; +} + +export type ExpandedRowState = { + containerId: number; + dataSource: MismatchKeys[]; + totalCount: number; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts new file mode 100644 index 00000000000..7c5a23bc0af --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/pipelines.types.ts @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Option } from "@/v2/components/select/multiSelect"; + +export const PipelineStatusList = [ + 'OPEN', + 'CLOSING', + 'QUASI_CLOSED', + 'CLOSED', + 'UNHEALTHY', + 'INVALID', + 'DELETED', + 'DORMANT' +] as const; +export type PipelineStatus = typeof PipelineStatusList[number]; + +export type Pipeline = { + pipelineId: string; + status: PipelineStatus; + replicationType: string; + leaderNode: string; + datanodes: string[]; + lastLeaderElection: number; + duration: number; + leaderElections: number; + replicationFactor: string; + containers: number; +} + +export type PipelinesResponse = { + totalCount: number; + pipelines: Pipeline[]; +} + +export type PipelinesState = { + activeDataSource: Pipeline[]; + columnOptions: Option[]; + lastUpdated: number; +} + +export type PipelinesTableProps = { + loading: boolean; + data: Pipeline[]; + selectedColumns: Option[]; + searchTerm: string; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts index 67f007706a4..b808d403584 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/volume.types.ts @@ -40,5 +40,13 @@ export type VolumesState = { data: Volume[]; lastUpdated: number; columnOptions: Option[]; - currentRow: Volume | Record; +} + +export type VolumesTableProps = { + loading: boolean; + data: Volume[]; + handleAclClick: (arg0: Volume) => void; + selectedColumns: Option[]; + searchColumn: 'volume' | 'owner' | 'admin'; + searchTerm: string; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts new file mode 100644 index 00000000000..daaae2d54d3 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import moment from "moment"; + +moment.updateLocale('en', { + relativeTime: { + past: '%s ago', + s: '%ds', + m: '1min', + mm: '%dmins', + h: '1hr', + hh: '%dhrs', + d: '1d', + dd: '%dd', + M: '1m', + MM: '%dm', + y: '1y', + yy: '%dy' + } +}); + +export function getTimeDiffFromTimestamp(timestamp: number): string { + const timestampDate = new Date(timestamp); + return moment(timestampDate).fromNow(); +} + +export function getDurationFromTimestamp(timestamp: number): string { + const duration: moment.Duration = moment.duration(timestamp, 'milliseconds'); + // return nothing when the duration is falsy or not correctly parsed (P0D) + if(!duration || duration.toISOString() === "P0D") return ''; + + let elapsedTime = []; + const durationBreakdowns: Record = { + 'y': Math.floor(duration.years()), + 'm': Math.floor(duration.months()), + 'd': Math.floor(duration.days()), + 'h': Math.floor(duration.hours()), + 'min': Math.floor(duration.minutes()), + 's': Math.floor(duration.seconds()) + } + + for (const [key, value] of Object.entries(durationBreakdowns)) { + value > 0 && elapsedTime.push(value + key); + } + + return (elapsedTime.length === 0) ? 'Just now' : elapsedTime.join(' '); +} + +export function getFormattedTime(time: number | string, format: string) { + if (typeof time === 'string') return moment(time).format(format); + return (time > 0) ? moment(time).format(format) : 'N/A'; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index d7fdf2b9eb8..4a3c11c11b0 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -66,7 +66,6 @@ interface IDatanodeResponse { version: string; setupTime: number; revision: string; - buildDate: string; networkLocation: string; } @@ -92,7 +91,6 @@ interface IDatanode { version: string; setupTime: number; revision: string; - buildDate: string; networkLocation: string; } @@ -331,15 +329,6 @@ const COLUMNS = [ sorter: (a: IDatanode, b: IDatanode) => a.revision.localeCompare(b.revision), defaultSortOrder: 'ascend' as const }, - { - title: 'Build Date', - dataIndex: 'buildDate', - key: 'buildDate', - isVisible: true, - isSearchable: true, - sorter: (a: IDatanode, b: IDatanode) => a.buildDate.localeCompare(b.buildDate), - defaultSortOrder: 'ascend' as const - }, { title: 'Network Location', dataIndex: 'networkLocation', @@ -446,7 +435,6 @@ export class Datanodes extends React.Component, IDatanode version: datanode.version, setupTime: datanode.setupTime, revision: datanode.revision, - buildDate: datanode.buildDate, networkLocation: datanode.networkLocation }; }); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx index f273f758ea9..63f095ff7ca 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx @@ -101,9 +101,7 @@ export class Insights extends React.Component, IInsightsS // Disable bucket selection dropdown if more than one volume is selected // If there is only one volume, bucket selection dropdown should not be disabled. const isBucketSelectionDisabled = !selectedVolumes || - (selectedVolumes && - (selectedVolumes.length > 1 && - (volumeBucketMap.size !== 1))); + (selectedVolumes?.length > 1 && volumeBucketMap.size !== 1); let bucketOptions: IOption[] = []; // When volume is changed and more than one volume is selected, // selected buckets value should be reset to all buckets @@ -455,7 +453,7 @@ export class Insights extends React.Component, IInsightsS
    {isLoading ? Loading... : - ((fileCountsResponse && fileCountsResponse.length > 0) ? + ((fileCountsResponse?.length > 0) ?
    @@ -506,7 +504,7 @@ export class Insights extends React.Component, IInsightsS
    {isLoading ? Loading... : - ((containerCountResponse && containerCountResponse.length > 0) ? + ((containerCountResponse?.length > 0) ?
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx index b56e8d8151a..f092708348b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx @@ -281,8 +281,9 @@ const DELETED_TAB_COLUMNS = [ const PENDINGDIR_TAB_COLUMNS = [ { title: 'Directory Name', - dataIndex: 'path', - key: 'path' + dataIndex: 'key', + isSearchable: true, + key: 'key' }, { title: 'In state since', @@ -294,9 +295,8 @@ const PENDINGDIR_TAB_COLUMNS = [ }, { title: 'Path', - dataIndex: 'key', - key: 'key', - isSearchable: true, + dataIndex: 'path', + key: 'path', width: '450px' }, { @@ -530,7 +530,7 @@ export class Om extends React.Component, IOmdbInsightsSta const { request, controller } = AxiosGetHelper(mismatchEndpoint, cancelMismatchedEndpointSignal) cancelMismatchedEndpointSignal = controller; request.then(mismatchContainersResponse => { - const mismatchContainers: IContainerResponse[] = mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.containerDiscrepancyInfo; + const mismatchContainers: IContainerResponse[] = mismatchContainersResponse?.data?.containerDiscrepancyInfo && []; this.setState({ loading: false, @@ -567,7 +567,7 @@ export class Om extends React.Component, IOmdbInsightsSta const { request, controller } = AxiosGetHelper(openKeysEndpoint, cancelOpenKeysSignal) cancelOpenKeysSignal = controller request.then(openKeysResponse => { - const openKeys = openKeysResponse && openKeysResponse.data; + const openKeys = openKeysResponse?.data ?? {"fso": []}; let allopenKeysResponse: any[] = []; for (let key in openKeys) { if (Array.isArray(openKeys[key])) { @@ -614,7 +614,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletePendingSignal = controller; request.then(deletePendingKeysResponse => { - const deletePendingKeys = deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.deletedKeyInfo; + const deletePendingKeys = deletePendingKeysResponse?.data?.deletedKeyInfo ?? []; //Use Summation Logic iterate through all object and find sum of all datasize let deletedKeyInfoData = []; deletedKeyInfoData = deletePendingKeys && deletePendingKeys.flatMap((infoObject: any) => { @@ -714,7 +714,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletedKeysSignal = controller request.then(deletedKeysResponse => { let deletedContainerKeys = []; - deletedContainerKeys = deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.containers; + deletedContainerKeys = deletedKeysResponse?.data?.containers ?? []; this.setState({ loading: false, deletedContainerKeysDataSource: deletedContainerKeys @@ -748,7 +748,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletedPendingDirSignal = controller request.then(deletePendingDirResponse => { let deletedDirInfo = []; - deletedDirInfo = deletePendingDirResponse && deletePendingDirResponse.data && deletePendingDirResponse.data.deletedDirInfo; + deletedDirInfo = deletePendingDirResponse?.data?.deletedDirInfo ?? []; this.setState({ loading: false, pendingDeleteDirDataSource: deletedDirInfo diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts index ddb2832f39b..1a079c5efa4 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts @@ -21,7 +21,7 @@ import { defineConfig, splitVendorChunkPlugin } from 'vite'; import { resolve } from 'path'; -import react from '@vitejs/plugin-react'; +import react from '@vitejs/plugin-react-swc'; function pathResolve(dir: string) { return resolve(__dirname, '.', dir) @@ -29,6 +29,12 @@ function pathResolve(dir: string) { // https://vitejs.dev/config/ export default defineConfig({ + plugins: [ + react({ + devTarget: "es2015" //SWC by default bypasses the build target, set dev target explicitly + }), + splitVendorChunkPlugin() + ], build: { target: "es2015", outDir: 'build', @@ -48,7 +54,6 @@ export default defineConfig({ } } }, - plugins: [react(), splitVendorChunkPlugin()], server: { proxy: { "/api": { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 82c7c1b5bef..da5484c9b89 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -62,10 +62,12 @@ import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates; import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers; import org.junit.jupiter.api.BeforeEach; @@ -121,6 +123,7 @@ public class TestContainerEndpoint { LoggerFactory.getLogger(TestContainerEndpoint.class); private OzoneStorageContainerManager ozoneStorageContainerManager; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; private ReconContainerManager reconContainerManager; private ContainerStateManager containerStateManager; private ReconPipelineManager reconPipelineManager; @@ -198,6 +201,8 @@ private void initializeInjector() throws Exception { containerEndpoint = reconTestInjector.getInstance(ContainerEndpoint.class); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); + this.reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); pipeline = getRandomPipeline(); pipelineID = pipeline.getId(); @@ -472,6 +477,10 @@ public void testGetKeysForContainer() throws IOException { // Now to check if the ContainerEndpoint also reads the File table // Set up test data for FSO keys setUpFSOData(); + NSSummaryTaskWithFSO nSSummaryTaskWithFso = + new NSSummaryTaskWithFSO(reconNamespaceSummaryManager, + reconOMMetadataManager, new OzoneConfiguration()); + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); // Reprocess the container key mapper to ensure the latest mapping is used reprocessContainerKeyMapper(); response = containerEndpoint.getKeysForContainer(20L, -1, ""); @@ -556,6 +565,10 @@ public void testGetKeysForContainerWithPrevKey() throws IOException { setUpFSOData(); // Reprocess the container key mapper to ensure the latest mapping is used reprocessContainerKeyMapper(); + NSSummaryTaskWithFSO nSSummaryTaskWithFso = + new NSSummaryTaskWithFSO(reconNamespaceSummaryManager, + reconOMMetadataManager, new OzoneConfiguration()); + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); response = containerEndpoint.getKeysForContainer(20L, -1, "/0/1/2/file7"); // Ensure that the expected number of keys is returned @@ -886,7 +899,9 @@ public void testUnhealthyContainersFilteredResponse() throws IOException, TimeoutException { String missing = UnHealthyContainerStates.MISSING.toString(); String emptyMissing = UnHealthyContainerStates.EMPTY_MISSING.toString(); + String negativeSize = UnHealthyContainerStates.NEGATIVE_SIZE.toString(); // For NEGATIVE_SIZE state + // Initial empty response verification Response response = containerEndpoint .getUnhealthyContainers(missing, 1000, 1); @@ -899,44 +914,55 @@ public void testUnhealthyContainersFilteredResponse() assertEquals(0, responseObject.getMisReplicatedCount()); assertEquals(Collections.EMPTY_LIST, responseObject.getContainers()); + // Add unhealthy records putContainerInfos(5); uuid1 = newDatanode("host1", "127.0.0.1"); uuid2 = newDatanode("host2", "127.0.0.2"); uuid3 = newDatanode("host3", "127.0.0.3"); uuid4 = newDatanode("host4", "127.0.0.4"); createUnhealthyRecords(5, 4, 3, 2); - createEmptyMissingUnhealthyRecords(2); + createEmptyMissingUnhealthyRecords(2); // For EMPTY_MISSING state + createNegativeSizeUnhealthyRecords(2); // For NEGATIVE_SIZE state + // Check for unhealthy containers response = containerEndpoint.getUnhealthyContainers(missing, 1000, 1); responseObject = (UnhealthyContainersResponse) response.getEntity(); + // Summary should have the count for all unhealthy: assertEquals(5, responseObject.getMissingCount()); assertEquals(4, responseObject.getOverReplicatedCount()); assertEquals(3, responseObject.getUnderReplicatedCount()); assertEquals(2, responseObject.getMisReplicatedCount()); - Collection records - = responseObject.getContainers(); + Collection records = responseObject.getContainers(); assertTrue(records.stream() .flatMap(containerMetadata -> containerMetadata.getReplicas().stream() .map(ContainerHistory::getState)) .allMatch(s -> s.equals("UNHEALTHY"))); - // There should only be 5 missing containers and no others as we asked for - // only missing. + + // Verify only missing containers are returned assertEquals(5, records.size()); for (UnhealthyContainerMetadata r : records) { assertEquals(missing, r.getContainerState()); } + // Check for empty missing containers, should return zero Response filteredEmptyMissingResponse = containerEndpoint .getUnhealthyContainers(emptyMissing, 1000, 1); responseObject = (UnhealthyContainersResponse) filteredEmptyMissingResponse.getEntity(); records = responseObject.getContainers(); - // Assert for zero empty missing containers. + assertEquals(0, records.size()); + + // Check for negative size containers, should return zero + Response filteredNegativeSizeResponse = containerEndpoint + .getUnhealthyContainers(negativeSize, 1000, 1); + responseObject = (UnhealthyContainersResponse) filteredNegativeSizeResponse.getEntity(); + records = responseObject.getContainers(); assertEquals(0, records.size()); } + @Test public void testUnhealthyContainersInvalidState() { WebApplicationException e = assertThrows(WebApplicationException.class, @@ -1043,6 +1069,15 @@ private void createEmptyMissingUnhealthyRecords(int emptyMissing) { } } + private void createNegativeSizeUnhealthyRecords(int negativeSize) { + int cid = 0; + for (int i = 0; i < negativeSize; i++) { + createUnhealthyRecord(++cid, UnHealthyContainerStates.NEGATIVE_SIZE.toString(), + 3, 3, 0, null); // Added for NEGATIVE_SIZE state + } + } + + private void createUnhealthyRecords(int missing, int overRep, int underRep, int misRep) { int cid = 0; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java new file mode 100644 index 00000000000..5f3d0fa1268 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java @@ -0,0 +1,549 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import javax.ws.rs.core.Response; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +/** + * Test class for DeletedKeysSearchEndpoint. + * + * This class tests various scenarios for searching deleted keys within a + * given volume, bucket, and directory structure. The tests include: + * + * 1. Test Root Level Search Restriction: Ensures searching at the root level returns a bad request. + * 2. Test Volume Level Search Restriction: Ensures searching at the volume level returns a bad request. + * 3. Test Bucket Level Search: Verifies search results within different types of buckets, both FSO and OBS. + * 4. Test Directory Level Search: Validates searching inside specific directories. + * 5. Test Key Level Search: Confirms search results for specific keys within buckets, both FSO and OBS. + * 6. Test Key Level Search Under Directory: Verifies searching for keys within nested directories. + * 7. Test Search Under Nested Directory: Checks search results within nested directories. + * 8. Test Limit Search: Tests the limit functionality of the search API. + * 9. Test Search Deleted Keys with Bad Request: Ensures bad requests with invalid params return correct responses. + * 10. Test Last Key in Response: Confirms the presence of the last key in paginated responses. + * 11. Test Search Deleted Keys with Pagination: Verifies paginated search results. + * 12. Test Search in Empty Bucket: Checks the response for searching within an empty bucket. + */ +public class TestDeletedKeysSearchEndpoint extends AbstractReconSqlDBTest { + + @TempDir + private Path temporaryFolder; + private ReconOMMetadataManager reconOMMetadataManager; + private OMDBInsightEndpoint omdbInsightEndpoint; + private OzoneConfiguration ozoneConfiguration; + private static final String ROOT_PATH = "/"; + private OMMetadataManager omMetadataManager; + + @BeforeEach + public void setUp() throws Exception { + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 100); + omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")).toFile()); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile()); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class)) + .addBinding(OzoneStorageContainerManager.class, + ReconStorageContainerManagerFacade.class) + .withContainerDB() + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(OMDBInsightEndpoint.class) + .addBinding(ContainerHealthSchemaManager.class) + .build(); + omdbInsightEndpoint = reconTestInjector.getInstance(OMDBInsightEndpoint.class); + populateOMDB(); + } + + + private static OMMetadataManager initializeNewOmMetadataManager(File omDbDir) throws IOException { + OzoneConfiguration omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath()); + return new OmMetadataManagerImpl(omConfiguration, null); + } + + @Test + public void testRootLevelSearchRestriction() throws IOException { + String rootPath = "/"; + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", rootPath); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + } + + @Test + public void testEmptySearchPrefix() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(100, "", ""); + // In this case we get all the keys from the OMDB + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(16, result.getRepeatedOmKeyInfoList().size()); + + // Set limit to 10 and pass empty search prefix + response = omdbInsightEndpoint.getDeletedKeyInfo(10, "", ""); + // In this case we get all the keys from the OMDB + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(10, result.getRepeatedOmKeyInfoList().size()); + } + + @Test + public void testVolumeLevelSearchRestriction() throws IOException { + String volumePath = "/vola"; + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", volumePath); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + + volumePath = "/volb"; + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", volumePath); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + } + + @Test + public void testBucketLevelSearch() throws IOException { + // Search inside FSO bucket + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(7, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(2, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + // Search inside OBS bucket + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(9, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/vola/nonexistentbucket"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testDirectoryLevelSearch() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc2"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(5, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb1/nonexistentdir"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testKeyLevelSearch() throws IOException { + // FSO Bucket key-level search + Response response = + omdbInsightEndpoint.getDeletedKeyInfo(10, "", "/volb/bucketb1/fileb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + response = + omdbInsightEndpoint.getDeletedKeyInfo(10, "", "/volb/bucketb1/fileb2"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + // Test with non-existent key + response = omdbInsightEndpoint.getDeletedKeyInfo(1, "", "/volb/bucketb1/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), + response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testKeyLevelSearchUnderDirectory() throws IOException { + // FSO Bucket key-level search under directory + Response response = + omdbInsightEndpoint.getDeletedKeyInfo(10, "", "/volb/bucketb1/dir1/file1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(10, "", + "/volb/bucketb1/dir1/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), + response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testSearchUnderNestedDirectory() throws IOException { + // OBS Bucket nested directory search + Response response = + omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11/dirc111"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11/dirc111/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testLimitSearch() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(2, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + } + + @Test + public void testSearchDeletedKeysWithBadRequest() throws IOException { + int negativeLimit = -1; + Response response = omdbInsightEndpoint.getDeletedKeyInfo(negativeLimit, "", "@323232"); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "///"); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + } + + @Test + public void testLastKeyInResponse() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(7, result.getRepeatedOmKeyInfoList().size()); + + // Compute the expected last key from the last entry in the result list + String computedLastKey = "/" + + result.getRepeatedOmKeyInfoList().get(6).getOmKeyInfoList().get(0).getVolumeName() + "/" + + result.getRepeatedOmKeyInfoList().get(6).getOmKeyInfoList().get(0).getBucketName() + "/" + + result.getRepeatedOmKeyInfoList().get(6).getOmKeyInfoList().get(0).getKeyName() + "/"; + + // Check that the last key in the response starts with the expected value + assertTrue(result.getLastKey().startsWith(computedLastKey)); + } + + @Test + public void testSearchDeletedKeysWithPagination() throws IOException { + String startPrefix = "/volb/bucketb1"; + int limit = 2; + String prevKey = ""; + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + // Compute the expected last key from the last entry in the result list + String computedLastKey = "/" + + result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0) + .getVolumeName() + "/" + + result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0) + .getBucketName() + "/" + + result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0) + .getKeyName() + "/"; + + // Check that the last key in the response starts with the expected value + assertTrue(result.getLastKey().startsWith(computedLastKey)); + } + + @Test + public void testSearchInEmptyBucket() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb2"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testPrevKeyProvidedStartPrefixEmpty() throws IOException { + // Case 1: prevKey provided, startPrefix empty + // Seek to the prevKey, skip the first matching record, then return remaining records until limit is reached. + String prevKey = "/volb/bucketb1/fileb3"; // This key exists, will skip it + int limit = 3; + String startPrefix = ""; // Empty startPrefix + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get the next 3 records after skipping the prevKey + assertEquals(3, result.getRepeatedOmKeyInfoList().size()); + assertEquals("fileb4", result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testPrevKeyEmptyStartPrefixEmpty() throws IOException { + // Case 2: prevKey empty, startPrefix empty + // No need to seek, start from the first record and return records until limit is reached. + String prevKey = ""; // Empty prevKey + int limit = 100; + String startPrefix = ""; // Empty startPrefix + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get all the 16 records currently in the deleted keys table + assertEquals(16, result.getRepeatedOmKeyInfoList().size()); + } + + @Test + public void testPrevKeyEmptyStartPrefixProvided() throws IOException { + // Case 3: prevKey empty, startPrefix provided + // Seek to the startPrefix and return matching records until limit is reached. + String prevKey = ""; // Empty prevKey + int limit = 2; + String startPrefix = "/volb/bucketb1/fileb"; // Seek to startPrefix and match files + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get the first 2 records that match startPrefix + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + assertEquals("fileb1", result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testPrevKeyProvidedStartPrefixProvided() throws IOException { + // Case 4: prevKey provided, startPrefix provided + // Seek to the prevKey, skip it, and return remaining records matching startPrefix until limit is reached. + String prevKey = "/volb/bucketb1/fileb2"; // This key exists, will skip it + int limit = 3; + String startPrefix = "/volb/bucketb1"; // Matching prefix + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get the next 2 records that match startPrefix after skipping prevKey having fileb2 + assertEquals(3, result.getRepeatedOmKeyInfoList().size()); + assertEquals("fileb3", result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + } + + + /** + * Populates the OMDB with a set of deleted keys for testing purposes. + * This diagram is for reference: + * * root + * ├── volb (Total Size: 7000KB) + * │ ├── bucketb1 (Total Size: 7000KB) + * │ │ ├── fileb1 (Size: 1000KB) + * │ │ ├── fileb2 (Size: 1000KB) + * │ │ ├── fileb3 (Size: 1000KB) + * │ │ ├── fileb4 (Size: 1000KB) + * │ │ ├── fileb5 (Size: 1000KB) + * │ │ ├── dir1 (Total Size: 2000KB) + * │ │ │ ├── file1 (Size: 1000KB) + * │ │ │ └── file2 (Size: 1000KB) + * ├── volc (Total Size: 9000KB) + * │ ├── bucketc1 (Total Size: 9000KB) + * │ │ ├── dirc1 (Total Size: 4000KB) + * │ │ │ ├── filec1 (Size: 1000KB) + * │ │ │ ├── filec2 (Size: 1000KB) + * │ │ │ ├── dirc11 (Total Size: 2000KB) + * │ │ │ ├── filec11 (Size: 1000KB) + * │ │ │ └── dirc111 (Total Size: 1000KB) + * │ │ │ └── filec111 (Size: 1000KB) + * │ │ ├── dirc2 (Total Size: 5000KB) + * │ │ │ ├── filec3 (Size: 1000KB) + * │ │ │ ├── filec4 (Size: 1000KB) + * │ │ │ ├── filec5 (Size: 1000KB) + * │ │ │ ├── filec6 (Size: 1000KB) + * │ │ │ └── filec7 (Size: 1000KB) + * + * @throws Exception if an error occurs while creating deleted keys. + */ + private void populateOMDB() throws Exception { + + createDeletedKey("fileb1", "bucketb1", "volb", 1000); + createDeletedKey("fileb2", "bucketb1", "volb", 1000); + createDeletedKey("fileb3", "bucketb1", "volb", 1000); + createDeletedKey("fileb4", "bucketb1", "volb", 1000); + createDeletedKey("fileb5", "bucketb1", "volb", 1000); + + createDeletedKey("dir1/file1", "bucketb1", "volb", 1000); + createDeletedKey("dir1/file2", "bucketb1", "volb", 1000); + + createDeletedKey("dirc1/filec1", "bucketc1", "volc", 1000); + createDeletedKey("dirc1/filec2", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec3", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec4", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec5", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filgetec6", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec7", "bucketc1", "volc", 1000); + + // create nested directories and files in bucketc1 + createDeletedKey("dirc1/dirc11/filec11", "bucketc1", "volc", 1000); + createDeletedKey("dirc1/dirc11/dirc111/filec111", "bucketc1", "volc", 1000); + } + + private void createDeletedKey(String keyName, String bucketName, + String volumeName, long dataSize) throws IOException { + // Construct the deleted key path + String deletedKey = "/" + volumeName + "/" + bucketName + "/" + keyName + "/" + + UUID.randomUUID().getMostSignificantBits(); + + // Create a list to hold OmKeyInfo objects + List omKeyInfos = new ArrayList<>(); + + // Build OmKeyInfo object + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setDataSize(dataSize) + .setObjectID(UUID.randomUUID().getMostSignificantBits()) + .setReplicationConfig(StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .build(); + + // Add the OmKeyInfo object to the list + omKeyInfos.add(omKeyInfo); + + // Create a RepeatedOmKeyInfo object with the list of OmKeyInfo + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfos); + + // Write the deleted key information to the OM metadata manager + writeDeletedKeysToOm(reconOMMetadataManager, deletedKey, repeatedOmKeyInfo); + } + + /** + * Writes deleted key information to the Ozone Manager metadata table. + * @param omMetadataManager the Ozone Manager metadata manager + * @param deletedKey the name of the deleted key + * @param repeatedOmKeyInfo the RepeatedOmKeyInfo object containing key information + * @throws IOException if there is an error accessing the metadata table + */ + public static void writeDeletedKeysToOm(OMMetadataManager omMetadataManager, + String deletedKey, + RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException { + // Put the deleted key information into the deleted table + omMetadataManager.getDeletedTable().put(deletedKey, repeatedOmKeyInfo); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index c3d2fd484a5..f1dafa2c75b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -378,7 +378,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto) .setVersion("0.6.0") .setSetupTime(1596347628802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto1 = @@ -409,7 +408,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto2) .setVersion("0.6.0") .setSetupTime(1596347636802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto3 = @@ -441,7 +439,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto3) .setVersion("0.6.0") .setSetupTime(1596347628802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto5 = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index 54da926601e..9cda6d6e451 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -89,6 +89,7 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.anyLong; @@ -791,8 +792,9 @@ public void testConstructFullPath() throws IOException { .setParentObjectID(DIR_TWO_OBJECT_ID) .build(); // Call constructFullPath and verify the result - fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager, reconOMMetadataManager); + OmKeyInfo finalKeyInfo = keyInfo; + assertThrows(ServiceNotReadyException.class, () -> ReconUtils.constructFullPath(finalKeyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager)); } @Test @@ -813,8 +815,8 @@ public void testConstructFullPathWithNegativeParentIdTriggersRebuild() throws IO .setParentObjectID(dirOneObjectId) .build(); - String result = ReconUtils.constructFullPath(keyInfo, mockSummaryManager, mockMetadataManager); - assertEquals("", result, "Expected an empty string return due to rebuild trigger"); + assertThrows(ServiceNotReadyException.class, () -> + ReconUtils.constructFullPath(keyInfo, mockSummaryManager, mockMetadataManager)); } @Test @@ -836,7 +838,8 @@ public void testLoggingWhenParentIdIsNegative() throws IOException { .setParentObjectID(1L) .build(); - ReconUtils.constructFullPath(keyInfo, mockManager, null); + assertThrows(ServiceNotReadyException.class, () -> + ReconUtils.constructFullPath(keyInfo, mockManager, null)); // Assert ArgumentCaptor logCaptor = ArgumentCaptor.forClass(String.class); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 74c58cd9d38..61a9711876e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -37,10 +37,11 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; @@ -62,6 +63,7 @@ import org.junit.jupiter.api.io.TempDir; import javax.ws.rs.core.Response; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.sql.Timestamp; @@ -88,6 +90,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -216,6 +219,7 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private static final long KEY_TWENTY_TWO_OBJECT_ID = 37L; private static final long KEY_TWENTY_THREE_OBJECT_ID = 38L; private static final long KEY_TWENTY_FOUR_OBJECT_ID = 39L; + private static final long KEY_TWENTY_FIVE_OBJECT_ID = 42L; private static final long EMPTY_OBS_BUCKET_OBJECT_ID = 40L; private static final long EMPTY_FSO_BUCKET_OBJECT_ID = 41L; @@ -241,6 +245,7 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private static final long KEY_SEVENTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long KEY_EIGHTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 private static final long KEY_NINETEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_TWENTY_SIZE = OzoneConsts.KB + 1; // bin 1 private static final String OBS_BUCKET_PATH = "/volume1/obs-bucket"; private static final String FSO_BUCKET_PATH = "/volume1/fso-bucket"; @@ -893,11 +898,11 @@ public void testGetOpenKeyInfo() throws Exception { .get("/sampleVol/bucketOne/key_one"); assertEquals("key_one", omKeyInfo1.getKeyName()); Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(-1, "", true, true); + omdbInsightEndpoint.getOpenKeyInfo(-1, "", "", true, true); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); - assertEquals("key_one", + assertEquals("sampleVol/bucketOne/key_one", keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); } @@ -1040,7 +1045,7 @@ public void testGetOpenKeyInfoLimitParam() throws Exception { reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) .put("/sampleVol/bucketOne/key_three", omKeyInfo3); Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(2, "", true, true); + omdbInsightEndpoint.getOpenKeyInfo(2, "", "", true, true); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1049,10 +1054,10 @@ public void testGetOpenKeyInfoLimitParam() throws Exception { assertEquals(0, keyInsightInfoResp.getFsoKeyInfoList().size()); assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + keyInsightInfoResp.getNonFSOKeyInfoList().size()); - assertEquals("key_three", + assertEquals("sampleVol/bucketOne/key_three", keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); - openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(3, "", true, true); + openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(3, "", "", true, true); keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1061,7 +1066,7 @@ public void testGetOpenKeyInfoLimitParam() throws Exception { assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); assertEquals(3, keyInsightInfoResp.getFsoKeyInfoList().size() + keyInsightInfoResp.getNonFSOKeyInfoList().size()); - assertEquals("key_three", + assertEquals("sampleVol/bucketOne/key_three", keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); } @@ -1103,7 +1108,7 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 1 :- Display only FSO keys in response // includeFsoKeys=true, includeNonFsoKeys=false Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", true, false); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", true, false); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1115,7 +1120,7 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 2 :- Display only Non-FSO keys in response // includeFsoKeys=false, includeNonFsoKeys=true openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", false, true); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", false, true); keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); assertEquals(0, @@ -1126,7 +1131,7 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 3 :- Display both FSO and Non-FSO keys in response // includeFsoKeys=true, includeNonFsoKeys=true openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", true, true); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", true, true); keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); assertEquals(4, @@ -1137,45 +1142,39 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 4 :- Don't Display both FSO and Non-FSO keys in response // includeFsoKeys=false, includeNonFsoKeys=false openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", false, false); - keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); - assertNotNull(keyInsightInfoResp); - assertEquals(0, - keyInsightInfoResp.getFsoKeyInfoList().size()); - assertEquals(0, - keyInsightInfoResp.getNonFSOKeyInfoList().size()); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", false, false); + assertEquals(204, openKeyInfoResp.getStatus()); + String entity = (String) openKeyInfoResp.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); } @Test public void testGetOpenKeyInfoPrevKeyParam() throws Exception { OmKeyInfo omKeyInfo1 = - getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + getOmKeyInfo("sampleVol", "bucketOne", "key_1", true); OmKeyInfo omKeyInfo2 = - getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + getOmKeyInfo("sampleVol", "bucketOne", "key_2", true); OmKeyInfo omKeyInfo3 = - getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); + getOmKeyInfo("sampleVol", "bucketOne", "key_3", true); reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) - .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + .put("/sampleVol/bucketOne/key_1", omKeyInfo1); reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) - .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + .put("/sampleVol/bucketOne/key_2", omKeyInfo2); reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) - .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + .put("/sampleVol/bucketOne/key_3", omKeyInfo3); Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(-1, "/sampleVol/bucketOne/key_one", + omdbInsightEndpoint.getOpenKeyInfo(-1, "/sampleVol/bucketOne/key_1", "", true, true); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); - assertEquals(1, - keyInsightInfoResp.getNonFSOKeyInfoList().size()); + assertEquals(1, keyInsightInfoResp.getNonFSOKeyInfoList().size()); assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); - assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + - keyInsightInfoResp.getNonFSOKeyInfoList().size()); - assertEquals("key_three", - keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); - assertEquals("key_two", - keyInsightInfoResp.getFsoKeyInfoList().get(0).getPath()); + assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + assertEquals("sampleVol/bucketOne/key_3", keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); + assertEquals("sampleVol/bucketOne/key_2", keyInsightInfoResp.getFsoKeyInfoList().get(0).getPath()); } @Test @@ -1212,7 +1211,7 @@ public void testGetDeletedKeyInfoLimitParam() throws Exception { reconOMMetadataManager.getDeletedTable() .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); - Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, ""); + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, "", ""); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1244,7 +1243,7 @@ public void testGetDeletedKeyInfoPrevKeyParam() throws Exception { .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, - "/sampleVol/bucketOne/key_one"); + "/sampleVol/bucketOne/key_one", ""); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1278,7 +1277,7 @@ public void testGetDeletedKeyInfo() throws Exception { .get("/sampleVol/bucketOne/key_one"); assertEquals("key_one", repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); - Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, ""); + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, "", ""); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1287,6 +1286,128 @@ public void testGetDeletedKeyInfo() throws Exception { .get(0).getKeyName()); } + @Test + public void testGetDeletedKeysWithPrevKeyProvidedAndStartPrefixEmpty() + throws Exception { + // Prepare mock data in the deletedTable. + for (int i = 1; i <= 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, + new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 1: prevKey provided, startPrefix empty + Response deletedKeyInfoResponse = omdbInsightEndpoint.getDeletedKeyInfo(5, + "/sampleVol/bucketOne/deleted_key_3", ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response skips the prevKey and returns subsequent records. + assertNotNull(keyInsightInfoResp); + assertEquals(5, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_4", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_8", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(4).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeysWithPrevKeyEmptyAndStartPrefixEmpty() + throws Exception { + // Prepare mock data in the deletedTable. + for (int i = 1; i < 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 2: prevKey empty, startPrefix empty + Response deletedKeyInfoResponse = + omdbInsightEndpoint.getDeletedKeyInfo(5, "", ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response retrieves from the beginning. + assertNotNull(keyInsightInfoResp); + assertEquals(5, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_1", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_5", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(4).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeysWithStartPrefixProvidedAndPrevKeyEmpty() + throws Exception { + // Prepare mock data in the deletedTable. + for (int i = 1; i < 5; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + for (int i = 5; i < 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketTwo", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketTwo/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 3: startPrefix provided, prevKey empty + Response deletedKeyInfoResponse = + omdbInsightEndpoint.getDeletedKeyInfo(5, "", + "/sampleVol/bucketOne/"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response retrieves starting from the prefix. + assertNotNull(keyInsightInfoResp); + assertEquals(4, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_1", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_4", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(3).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeysWithBothPrevKeyAndStartPrefixProvided() + throws IOException { + // Prepare mock data in the deletedTable. + for (int i = 1; i < 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + for (int i = 10; i < 15; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketTwo", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketTwo/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 4: startPrefix and prevKey provided + Response deletedKeyInfoResponse = + omdbInsightEndpoint.getDeletedKeyInfo(5, + "/sampleVol/bucketOne/deleted_key_5", + "/sampleVol/bucketOne/"); + + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response retrieves starting from the prefix and skips the prevKey. + assertNotNull(keyInsightInfoResp); + assertEquals(4, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_6", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_9", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(3).getOmKeyInfoList().get(0).getKeyName()); + } + + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, String keyName, boolean isFile) { return new OmKeyInfo.Builder() @@ -1456,7 +1577,7 @@ public void testListKeysFSOBucket() { "", 1000); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(6, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/file1", keyEntityInfo.getKey()); assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey()); @@ -1488,7 +1609,7 @@ public void testListKeysFSOBucketWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1530,7 +1651,7 @@ public void testListKeysFSOBucketDirOnePathWithLimitTwoAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1572,7 +1693,7 @@ public void testListKeysFSOBucketDirOnePathWithLimitOneAndPagination() { "", 1); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(1, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/file1", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1623,7 +1744,7 @@ public void testListKeysFSOBucketTwoPathWithLimitAcrossDirsAtBucketLevel() { "", 3); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(3, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket2/dir8/file1", keyEntityInfo.getPath()); assertEquals("/1/30/32/file1", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1656,7 +1777,7 @@ public void testListKeysFSOBucketDirTwoPathWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/dir2/file1", keyEntityInfo.getPath()); assertEquals("/1/10/12/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1689,7 +1810,7 @@ public void testListKeysFSOBucketDirThreePathWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/dir2/dir3/file1", keyEntityInfo.getPath()); assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1776,7 +1897,7 @@ public void testListKeysOBSBucketWithLimitAndPagination() throws Exception { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/obs-bucket/key1", keyEntityInfo.getPath()); assertEquals("/volume1/obs-bucket/key1/key2", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1817,6 +1938,18 @@ public void testListKeysForEmptyOBSBucket() { assertEquals("", listKeysResponse.getLastKey()); } + @Test + public void testListKeysWhenNSSummaryNotInitialized() throws Exception { + reconNamespaceSummaryManager.clearNSSummaryTable(); + // bucket level DU + Response bucketResponse = + omdbInsightEndpoint.listKeys("RATIS", "", 0, FSO_BUCKET_TWO_PATH, + "", 1000); + ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.INITIALIZING, listKeysResponse.getStatus()); + assertEquals(Response.Status.SERVICE_UNAVAILABLE.getStatusCode(), bucketResponse.getStatus()); + } + @Test public void testListKeysForEmptyFSOBucket() { Response bucketResponse = omdbInsightEndpoint.listKeys("RATIS", "", 0, EMPTY_FSO_BUCKET_PATH, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java index f64d93707a2..e320c19069e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java @@ -300,7 +300,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto) .setVersion("0.6.0") .setSetupTime(1596347628802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java similarity index 81% rename from hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java rename to hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java index ab16f349af2..f55d988cfe0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java @@ -81,12 +81,12 @@ * 11. Test Search Open Keys with Pagination: Verifies paginated search results. * 12. Test Search in Empty Bucket: Checks the response for searching within an empty bucket. */ -public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { +public class TestOpenKeysSearchEndpoint extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; private ReconOMMetadataManager reconOMMetadataManager; - private OMDBInsightSearchEndpoint omdbInsightSearchEndpoint; + private OMDBInsightEndpoint omdbInsightEndpoint; private OzoneConfiguration ozoneConfiguration; private static final String ROOT_PATH = "/"; private static final String TEST_USER = "TestUser"; @@ -97,11 +97,9 @@ public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { @BeforeEach public void setUp() throws Exception { ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, - 100); + ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 100); omMetadataManager = initializeNewOmMetadataManager( - Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")) - .toFile()); + Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")).toFile()); reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile()); @@ -118,11 +116,8 @@ public void setUp() throws Exception { .addBinding(OMDBInsightEndpoint.class) .addBinding(ContainerHealthSchemaManager.class) .build(); - reconNamespaceSummaryManager = - reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); - omdbInsightSearchEndpoint = reconTestInjector.getInstance( - OMDBInsightSearchEndpoint.class); - + reconNamespaceSummaryManager = reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + omdbInsightEndpoint = reconTestInjector.getInstance(OMDBInsightEndpoint.class); // populate OM DB and reprocess into Recon RocksDB populateOMDB(); NSSummaryTaskWithFSO nSSummaryTaskWithFso = @@ -152,26 +147,19 @@ private static OMMetadataManager initializeNewOmMetadataManager( public void testRootLevelSearchRestriction() throws IOException { // Test with root level path String rootPath = "/"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); + Response response = + omdbInsightEndpoint.getOpenKeyInfo(-1, "", rootPath, true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), "Expected a message indicating the path must be at the bucket level or deeper"); - - // Test with root level path without trailing slash - rootPath = ""; - response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); - assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); - entity = (String) response.getEntity(); - assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), - "Expected a message indicating the path must be at the bucket level or deeper"); } @Test public void testVolumeLevelSearchRestriction() throws IOException { // Test with volume level path String volumePath = "/vola"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); + Response response = omdbInsightEndpoint.getOpenKeyInfo(20, "", volumePath, true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -179,7 +167,7 @@ public void testVolumeLevelSearchRestriction() throws IOException { // Test with another volume level path volumePath = "/volb"; - response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", volumePath, true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -190,7 +178,7 @@ public void testVolumeLevelSearchRestriction() throws IOException { public void testBucketLevelSearch() throws IOException { // Search inside FSO bucket Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -202,7 +190,7 @@ public void testBucketLevelSearch() throws IOException { // Search inside OBS bucket response = - omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/volb/bucketb1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -214,14 +202,14 @@ public void testBucketLevelSearch() throws IOException { // Search Inside LEGACY bucket response = - omdbInsightSearchEndpoint.searchOpenKeys("/volc/bucketc1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/volc/bucketc1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(7, result.getNonFSOKeyInfoList().size()); // Test with bucket that does not exist - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/nonexistentbucket", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/nonexistentbucket", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -230,7 +218,7 @@ public void testBucketLevelSearch() throws IOException { @Test public void testDirectoryLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/dira1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -241,7 +229,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/dira2", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -252,7 +240,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/dira3", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -263,8 +251,8 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(10000 * 3, result.getReplicatedDataSize()); // Test with non-existent directory - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/nonexistentdir", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/nonexistentdir", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -273,7 +261,7 @@ public void testDirectoryLevelSearch() throws IOException { @Test public void testKeyLevelSearch() throws IOException { // FSO Bucket key-level search - Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea1", 10, ""); + Response response = omdbInsightEndpoint.getOpenKeyInfo(10, "", "/vola/bucketa1/filea1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); @@ -282,7 +270,7 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000, result.getUnreplicatedDataSize()); assertEquals(1000 * 3, result.getReplicatedDataSize()); - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea2", 10, ""); + response = omdbInsightEndpoint.getOpenKeyInfo(10, "", "/vola/bucketa1/filea2", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); @@ -292,7 +280,8 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); // OBS Bucket key-level search - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb1", 10, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/fileb1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(0, result.getFsoKeyInfoList().size()); @@ -301,7 +290,8 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000, result.getUnreplicatedDataSize()); assertEquals(1000 * 3, result.getReplicatedDataSize()); - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb2", 10, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/fileb2", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(0, result.getFsoKeyInfoList().size()); @@ -311,14 +301,16 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); // Test with non-existent key - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/nonexistentfile", 1, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/nonexistentfile", 1, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -328,30 +320,32 @@ public void testKeyLevelSearch() throws IOException { @Test public void testKeyLevelSearchUnderDirectory() throws IOException { // FSO Bucket key-level search - Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1/innerfile", 10, ""); + Response response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira1/innerfile", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); - response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2/innerfile", 10, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira2/innerfile", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Test for unknown file in fso bucket - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1/unknownfile", 10, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira1/unknownfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); // Test for unknown file in fso bucket - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2/unknownfile", 10, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira2/unknownfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -360,56 +354,56 @@ public void testKeyLevelSearchUnderDirectory() throws IOException { @Test public void testSearchUnderNestedDirectory() throws IOException { - Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20, - ""); + Response response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(10, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search under dira31 - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3/dira31", - 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(6, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search under dira32 - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32", 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(3, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search under dira33 - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/dira33", 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/dira33", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search for the exact file under dira33 - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/dira33/file33_1", 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/dira33/file33_1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search for a non existant file under each nested directory - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/dira33/nonexistentfile", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/dira33/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/nonexistentfile", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -418,7 +412,7 @@ public void testSearchUnderNestedDirectory() throws IOException { @Test public void testLimitSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 2, ""); + omdbInsightEndpoint.getOpenKeyInfo(2, "", "/vola/bucketa1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -430,8 +424,8 @@ public void testLimitSearch() throws IOException { public void testSearchOpenKeysWithBadRequest() throws IOException { // Give a negative limit int negativeLimit = -1; - Response response = omdbInsightSearchEndpoint.searchOpenKeys("@323232", negativeLimit, ""); - + Response response = omdbInsightEndpoint + .getOpenKeyInfo(negativeLimit, "", "@323232", true, true); // Then the response should indicate that the request was bad assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus(), "Expected a 400 BAD REQUEST status"); @@ -440,7 +434,7 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), "Expected a message indicating the path must be at the bucket level or deeper"); - response = omdbInsightSearchEndpoint.searchOpenKeys("///", 20, ""); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "///", true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -449,8 +443,8 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { @Test public void testLastKeyInResponse() throws IOException { - Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20, ""); + Response response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/volb/bucketb1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -472,7 +466,7 @@ public void testSearchOpenKeysWithPagination() throws IOException { String prevKey = ""; // Perform the first search request - Response response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + Response response = omdbInsightEndpoint.getOpenKeyInfo(limit, prevKey, startPrefix, true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(2, result.getNonFSOKeyInfoList().size()); @@ -483,7 +477,7 @@ public void testSearchOpenKeysWithPagination() throws IOException { assertNotNull(prevKey, "Last key should not be null"); // Perform the second search request using the last key - response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + response = omdbInsightEndpoint.getOpenKeyInfo(limit, prevKey, startPrefix, true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(2, result.getNonFSOKeyInfoList().size()); @@ -494,7 +488,7 @@ public void testSearchOpenKeysWithPagination() throws IOException { assertNotNull(prevKey, "Last key should not be null"); // Perform the third search request using the last key - response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + response = omdbInsightEndpoint.getOpenKeyInfo(limit, prevKey, startPrefix, true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getNonFSOKeyInfoList().size()); @@ -506,13 +500,61 @@ public void testSearchOpenKeysWithPagination() throws IOException { @Test public void testSearchInEmptyBucket() throws IOException { // Search in empty bucket bucketb2 - Response response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb2", 20, ""); - assertEquals(404, response.getStatus()); + Response response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "/volb/bucketb2", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); } + @Test + public void testSearchWithPrevKeyOnly() throws IOException { + String prevKey = "/volb/bucketb1/fileb1"; // Key exists in volb/bucketb1 + Response response = omdbInsightEndpoint.getOpenKeyInfo(4, prevKey, "", true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getNonFSOKeyInfoList().size(), "Expected 4 remaining keys after 'fileb1'"); + assertEquals("/volb/bucketb1/fileb5", result.getLastKey(), "Expected last key to be 'fileb5'"); + } + + @Test + public void testSearchWithEmptyPrevKeyAndStartPrefix() throws IOException { + Response response = omdbInsightEndpoint.getOpenKeyInfo(-1, "", "", true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + // Assert all the keys are returned + assertEquals(12, result.getNonFSOKeyInfoList().size(), "Expected all keys to be returned"); + } + + @Test + public void testSearchWithStartPrefixOnly() throws IOException { + String startPrefix = "/volb/bucketb1/"; + Response response = omdbInsightEndpoint.getOpenKeyInfo(10, "", startPrefix, true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(5, result.getNonFSOKeyInfoList().size(), "Expected 5 keys starting with 'fileb1'"); + assertEquals("/volb/bucketb1/fileb5", result.getLastKey(), "Expected last key to be 'fileb5'"); + } + + @Test + public void testSearchWithPrevKeyAndStartPrefix() throws IOException { + String startPrefix = "/volb/bucketb1/"; + String prevKey = "/volb/bucketb1/fileb1"; + Response response = omdbInsightEndpoint.getOpenKeyInfo(10, prevKey, startPrefix, true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getNonFSOKeyInfoList().size(), "Expected 4 keys after 'fileb1'"); + assertEquals("/volb/bucketb1/fileb5", result.getLastKey(), "Expected last key to be 'fileb5'"); + } + /** * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. * The test setup mimics the following filesystem structure with specified sizes: @@ -568,7 +610,7 @@ private void populateOMDB() throws Exception { // Create Bucket in volb createBucket("volb", "bucketb1", 1000 + 1000 + 1000 + 1000 + 1000, - getOBSBucketLayout()); + getOBSBucketLayout()); createBucket("volb", "bucketb2", 0, getOBSBucketLayout()); // Empty Bucket // Create Bucket in volc diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java index e30590df55e..7c874a9e299 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.recon.api.NodeEndpoint; import org.apache.hadoop.ozone.recon.api.PipelineEndpoint; import org.apache.hadoop.ozone.recon.api.TaskStatusService; -import org.apache.hadoop.ozone.recon.api.TriggerDBSyncEndpoint; import org.apache.hadoop.ozone.recon.api.UtilizationEndpoint; import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; @@ -70,8 +69,14 @@ public void testAdminOnlyEndpoints() { assertThat(allEndpoints).isNotEmpty(); - // If an endpoint is added, it must be explicitly added to this set or be - // marked with @AdminOnly for this test to pass. + // If an endpoint is added, it must either require admin privileges by being + // marked with the `@AdminOnly` annotation, or be added to this set to exclude it. + // - Any endpoint that displays information related to the filesystem namespace + // (including aggregate counts), user information, or allows modification to the + // cluster's state should be marked as `@AdminOnly`. + // - Read-only endpoints that only return information about node status or + // cluster state do not require the `@AdminOnly` annotation and can be excluded + // from admin requirements by adding them to this set. Set> nonAdminEndpoints = new HashSet<>(); nonAdminEndpoints.add(UtilizationEndpoint.class); nonAdminEndpoints.add(ClusterStateEndpoint.class); @@ -79,7 +84,6 @@ public void testAdminOnlyEndpoints() { nonAdminEndpoints.add(NodeEndpoint.class); nonAdminEndpoints.add(PipelineEndpoint.class); nonAdminEndpoints.add(TaskStatusService.class); - nonAdminEndpoints.add(TriggerDBSyncEndpoint.class); assertThat(allEndpoints).containsAll(nonAdminEndpoints); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 8647639dd13..46e4506a5ef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -20,14 +20,20 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.assertj.core.api.Assertions.assertThat; -import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_BAD; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.IOException; import java.time.Duration; import java.util.ArrayList; @@ -102,7 +108,7 @@ public void testRun() throws Exception { // Create 7 containers. The first 5 will have various unhealthy states // defined below. The container with ID=6 will be healthy and - // container with ID=7 will be EMPTY_MISSING + // container with ID=7 will be EMPTY_MISSING (but not inserted into DB) List mockContainers = getMockContainers(7); when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); when(scmMock.getContainerManager()).thenReturn(containerManagerMock); @@ -129,20 +135,20 @@ public void testRun() throws Exception { when(containerManagerMock.getContainerReplicas(containerInfo2.containerID())) .thenReturn(getMockReplicas(2L, State.UNHEALTHY)); - // return 0 replicas for container ID 3 -> Empty Missing + // return 0 replicas for container ID 3 -> EMPTY_MISSING (will not be inserted into DB) ContainerInfo containerInfo3 = TestContainerInfo.newBuilderForTest().setContainerID(3).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(3L))).thenReturn(containerInfo3); when(containerManagerMock.getContainerReplicas(containerInfo3.containerID())) .thenReturn(Collections.emptySet()); - // Return 5 Healthy -> Over replicated + // Return 5 Healthy Replicas -> Over-replicated ContainerInfo containerInfo4 = TestContainerInfo.newBuilderForTest().setContainerID(4).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(4L))).thenReturn(containerInfo4); when(containerManagerMock.getContainerReplicas(containerInfo4.containerID())) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, - State.CLOSED, State.CLOSED, State.CLOSED)); + State.CLOSED, State.CLOSED, State.CLOSED)); // Mis-replicated ContainerInfo containerInfo5 = @@ -155,7 +161,7 @@ public void testRun() throws Exception { when(containerManagerMock.getContainerReplicas(containerInfo5.containerID())) .thenReturn(misReplicas); - // Return 3 Healthy -> Healthy container + // Return 3 Healthy Replicas -> Healthy container ContainerInfo containerInfo6 = TestContainerInfo.newBuilderForTest().setContainerID(6).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(6L))).thenReturn(containerInfo6); @@ -163,12 +169,14 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(6L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 7 -> MISSING + // return 0 replicas for container ID 7 -> MISSING (will later transition to EMPTY_MISSING but not inserted into DB) ContainerInfo containerInfo7 = TestContainerInfo.newBuilderForTest().setContainerID(7).setReplicationConfig(replicationConfig).build(); when(containerManagerMock.getContainer(ContainerID.valueOf(7L))).thenReturn(containerInfo7); when(containerManagerMock.getContainerReplicas(containerInfo7.containerID())) .thenReturn(Collections.emptySet()); + when(reconContainerMetadataManager.getKeyCountForContainer( + 7L)).thenReturn(5L); // Indicates non-empty container 7 for now List all = unHealthyContainersTableHandle.findAll(); assertThat(all).isEmpty(); @@ -177,8 +185,8 @@ public void testRun() throws Exception { ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(5)); - when(reconContainerMetadataManager.getKeyCountForContainer( - 7L)).thenReturn(5L); + + // Start container health task ContainerHealthTask containerHealthTask = new ContainerHealthTask(scmMock.getContainerManager(), scmMock.getScmServiceProvider(), @@ -186,8 +194,12 @@ public void testRun() throws Exception { placementMock, reconTaskConfig, reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); + + // Ensure unhealthy container count in DB matches expected LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 6)); + (unHealthyContainersTableHandle.count() == 5)); + + // Check for UNDER_REPLICATED container states UnhealthyContainers rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); @@ -197,19 +209,20 @@ public void testRun() throws Exception { assertEquals("UNDER_REPLICATED", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); + // Assert that EMPTY_MISSING state containers were never added to DB. + assertEquals(0, + unHealthyContainersTableHandle.fetchByContainerId(3L).size()); + List unhealthyContainers = containerHealthSchemaManager.getUnhealthyContainers( - ALL_REPLICAS_UNHEALTHY, 0, Integer.MAX_VALUE); + ALL_REPLICAS_BAD, 0, Integer.MAX_VALUE); assertEquals(1, unhealthyContainers.size()); assertEquals(2L, unhealthyContainers.get(0).getContainerId().longValue()); assertEquals(0, unhealthyContainers.get(0).getActualReplicaCount().intValue()); - rec = unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); - assertEquals("EMPTY_MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); - + // Check for MISSING state in container ID 7 rec = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); assertEquals("MISSING", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); @@ -230,9 +243,7 @@ public void testRun() throws Exception { assertThat(taskStatus.getLastUpdatedTimestamp()) .isGreaterThan(currentTime); - // Now run the job again, to check that relevant records are updated or - // removed as appropriate. Need to adjust the return value for all the mocks - // Under replicated -> Delta goes from 2 to 1 + // Adjust the mock results and rerun to check for updates or removal of records when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) .thenReturn(getMockReplicas(1L, State.CLOSED, State.CLOSED)); @@ -241,7 +252,7 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(2L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 3 -> Still empty Missing + // Container 3 remains EMPTY_MISSING, but no DB insertion when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); @@ -250,11 +261,16 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); - // Was mis-replicated - make it healthy now + // Convert container 7 which was MISSING to EMPTY_MISSING (not inserted into DB) + when(reconContainerMetadataManager.getKeyCountForContainer( + 7L)).thenReturn(0L); + placementMock.setMisRepWhenDnPresent(null); + // Ensure count is reduced after EMPTY_MISSING containers are not inserted LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 4)); + (unHealthyContainersTableHandle.count() == 2)); + rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); assertEquals(1, rec.getReplicaDelta().intValue()); @@ -263,36 +279,21 @@ public void testRun() throws Exception { assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(2L).size()); - rec = unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); - assertEquals("EMPTY_MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); - - rec = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); - assertEquals("MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); + // Assert that for container 7 no records exist in DB because it's now EMPTY_MISSING + assertEquals(0, + unHealthyContainersTableHandle.fetchByContainerId(7L).size()); rec = unHealthyContainersTableHandle.fetchByContainerId(4L).get(0); assertEquals("OVER_REPLICATED", rec.getContainerState()); assertEquals(-1, rec.getReplicaDelta().intValue()); - // This container is now healthy, it should not be in the table any more + // Ensure container 5 is now healthy and not in the table assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(5L).size()); - // Again make container Id 7 as empty which was missing as well, so in next - // container health task run, this container also should be deleted from - // UNHEALTHY_CONTAINERS table because we want to cleanup any existing - // EMPTY and MISSING containers from UNHEALTHY_CONTAINERS table. - when(reconContainerMetadataManager.getKeyCountForContainer(7L)).thenReturn(0L); - LambdaTestUtils.await(6000, 1000, () -> { - UnhealthyContainers emptyMissingContainer = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); - return ("EMPTY_MISSING".equals(emptyMissingContainer.getContainerState())); - }); - - // Just check once again that count doesn't change, only state of - // container 7 changes from MISSING to EMPTY_MISSING + // Just check once again that count remains consistent LambdaTestUtils.await(60000, 1000, () -> - (unHealthyContainersTableHandle.count() == 4)); + (unHealthyContainersTableHandle.count() == 2)); } @Test @@ -367,17 +368,12 @@ public void testDeletedContainer() throws Exception { reconContainerMetadataManager, new OzoneConfiguration()); containerHealthTask.start(); LambdaTestUtils.await(6000, 1000, () -> - (unHealthyContainersTableHandle.count() == 2)); + (unHealthyContainersTableHandle.count() == 1)); UnhealthyContainers rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("MISSING", rec.getContainerState()); assertEquals(3, rec.getReplicaDelta().intValue()); - rec = - unHealthyContainersTableHandle.fetchByContainerId(3L).get(0); - assertEquals("EMPTY_MISSING", rec.getContainerState()); - assertEquals(3, rec.getReplicaDelta().intValue()); - ReconTaskStatus taskStatus = reconTaskStatusDao.findById(containerHealthTask.getTaskName()); assertThat(taskStatus.getLastUpdatedTimestamp()) @@ -385,64 +381,191 @@ public void testDeletedContainer() throws Exception { } @Test - public void testNegativeSizeContainers() throws Exception { - // Setup mock objects and test environment - UnhealthyContainersDao unhealthyContainersDao = + public void testAllContainerStateInsertions() { + UnhealthyContainersDao unHealthyContainersTableHandle = getDao(UnhealthyContainersDao.class); + ContainerHealthSchemaManager containerHealthSchemaManager = new ContainerHealthSchemaManager( getSchemaDefinition(ContainerSchemaDefinition.class), - unhealthyContainersDao); + unHealthyContainersTableHandle); + + // Iterate through each state in the UnHealthyContainerStates enum + for (ContainerSchemaDefinition.UnHealthyContainerStates state : + ContainerSchemaDefinition.UnHealthyContainerStates.values()) { + + // Create a dummy UnhealthyContainer record with the current state + UnhealthyContainers unhealthyContainer = new UnhealthyContainers(); + unhealthyContainer.setContainerId(state.ordinal() + 1L); + + // Set replica counts based on the state + switch (state) { + case MISSING: + case EMPTY_MISSING: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(0); + unhealthyContainer.setReplicaDelta(3); + break; + + case UNDER_REPLICATED: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(1); + unhealthyContainer.setReplicaDelta(2); + break; + + case OVER_REPLICATED: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(4); + unhealthyContainer.setReplicaDelta(-1); + break; + + case MIS_REPLICATED: + case NEGATIVE_SIZE: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(3); + unhealthyContainer.setReplicaDelta(0); + break; + + case ALL_REPLICAS_BAD: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(0); + unhealthyContainer.setReplicaDelta(3); + break; + + default: + fail("Unhandled state: " + state.name() + ". Please add this state to the switch case."); + } + + unhealthyContainer.setContainerState(state.name()); + unhealthyContainer.setInStateSince(System.currentTimeMillis()); + + // Try inserting the record and catch any exception that occurs + Exception exception = null; + try { + containerHealthSchemaManager.insertUnhealthyContainerRecords( + Collections.singletonList(unhealthyContainer)); + } catch (Exception e) { + exception = e; + } + + // Assert no exception should be thrown for each state + assertNull(exception, + "Exception was thrown during insertion for state " + state.name() + + ": " + exception); + + // Optionally, verify the record was inserted correctly + List insertedRecords = + unHealthyContainersTableHandle.fetchByContainerId( + state.ordinal() + 1L); + assertFalse(insertedRecords.isEmpty(), + "Record was not inserted for state " + state.name() + "."); + assertEquals(insertedRecords.get(0).getContainerState(), state.name(), + "The inserted container state does not match for state " + + state.name() + "."); + } + } + + @Test + public void testMissingAndEmptyMissingContainerDeletion() throws Exception { + // Setup mock DAOs and managers + UnhealthyContainersDao unHealthyContainersTableHandle = + getDao(UnhealthyContainersDao.class); + ContainerHealthSchemaManager containerHealthSchemaManager = + new ContainerHealthSchemaManager( + getSchemaDefinition(ContainerSchemaDefinition.class), + unHealthyContainersTableHandle); ReconStorageContainerManagerFacade scmMock = mock(ReconStorageContainerManagerFacade.class); + MockPlacementPolicy placementMock = new MockPlacementPolicy(); ContainerManager containerManagerMock = mock(ContainerManager.class); StorageContainerServiceProvider scmClientMock = mock(StorageContainerServiceProvider.class); ReconContainerMetadataManager reconContainerMetadataManager = mock(ReconContainerMetadataManager.class); - MockPlacementPolicy placementMock = new MockPlacementPolicy(); + mock(ReconContainerMetadataManager.class); - // Mock container info setup - List mockContainers = getMockContainers(3); - when(scmMock.getContainerManager()).thenReturn(containerManagerMock); + // Create 2 containers. They start in CLOSED state in Recon. + List mockContainers = getMockContainers(2); when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); + when(scmMock.getContainerManager()).thenReturn(containerManagerMock); when(containerManagerMock.getContainers(any(ContainerID.class), anyInt())).thenReturn(mockContainers); + + // Mark both containers as initially CLOSED in Recon for (ContainerInfo c : mockContainers) { - when(containerManagerMock.getContainer( - c.containerID())).thenReturn(c); - when(scmClientMock.getContainerWithPipeline( - c.getContainerID())).thenReturn(new ContainerWithPipeline(c, null)); - when(containerManagerMock.getContainer(c.containerID()) - .getUsedBytes()).thenReturn(Long.valueOf(-10)); + when(containerManagerMock.getContainer(c.containerID())).thenReturn(c); } - // Verify the table is initially empty - assertThat(unhealthyContainersDao.findAll()).isEmpty(); + // Simulate SCM reporting the containers as DELETED + ContainerInfo deletedContainer1 = getMockDeletedContainer(1); + ContainerInfo deletedContainer2 = getMockDeletedContainer(2); + + when(scmClientMock.getContainerWithPipeline(1)) + .thenReturn(new ContainerWithPipeline(deletedContainer1, null)); + when(scmClientMock.getContainerWithPipeline(2)) + .thenReturn(new ContainerWithPipeline(deletedContainer2, null)); + + // Both containers start as CLOSED in Recon (MISSING or EMPTY_MISSING) + when(containerManagerMock.getContainer(ContainerID.valueOf(1L)).getState()) + .thenReturn(HddsProtos.LifeCycleState.CLOSED); + when(containerManagerMock.getContainer(ContainerID.valueOf(2L)).getState()) + .thenReturn(HddsProtos.LifeCycleState.CLOSED); + + // Replicas are empty, so both containers should be considered for deletion + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) + .thenReturn(Collections.emptySet()); + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) + .thenReturn(Collections.emptySet()); - // Setup and start the container health task + // Initialize UnhealthyContainers in DB (MISSING and EMPTY_MISSING) + // Create and set up the first UnhealthyContainer for a MISSING container + UnhealthyContainers container1 = new UnhealthyContainers(); + container1.setContainerId(1L); + container1.setContainerState("MISSING"); + container1.setExpectedReplicaCount(3); + container1.setActualReplicaCount(0); + container1.setReplicaDelta(3); + container1.setInStateSince(System.currentTimeMillis()); + + // Create and set up the second UnhealthyContainer for an EMPTY_MISSING container + UnhealthyContainers container2 = new UnhealthyContainers(); + container2.setContainerId(2L); + container2.setContainerState("MISSING"); + container2.setExpectedReplicaCount(3); + container2.setActualReplicaCount(0); + container2.setReplicaDelta(3); + container2.setInStateSince(System.currentTimeMillis()); + + unHealthyContainersTableHandle.insert(container1); + unHealthyContainersTableHandle.insert(container2); + + when(reconContainerMetadataManager.getKeyCountForContainer(1L)).thenReturn(5L); + when(reconContainerMetadataManager.getKeyCountForContainer(2L)).thenReturn(0L); + + // Start the container health task ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); - ContainerHealthTask containerHealthTask = new ContainerHealthTask( - scmMock.getContainerManager(), scmMock.getScmServiceProvider(), - reconTaskStatusDao, - containerHealthSchemaManager, placementMock, reconTaskConfig, - reconContainerMetadataManager, - new OzoneConfiguration()); - containerHealthTask.start(); + ContainerHealthTask containerHealthTask = + new ContainerHealthTask(scmMock.getContainerManager(), + scmMock.getScmServiceProvider(), + reconTaskStatusDao, containerHealthSchemaManager, + placementMock, reconTaskConfig, + reconContainerMetadataManager, new OzoneConfiguration()); - // Wait for the task to identify unhealthy containers - LambdaTestUtils.await(6000, 1000, - () -> unhealthyContainersDao.count() == 3); + containerHealthTask.start(); - // Assert that all unhealthy containers have been identified as NEGATIVE_SIZE states - List negativeSizeContainers = - unhealthyContainersDao.fetchByContainerState("NEGATIVE_SIZE"); - assertThat(negativeSizeContainers).hasSize(3); + // Wait for the task to complete and ensure that updateContainerState is invoked for + // container IDs 1 and 2 to mark the containers as DELETED, since they are DELETED in SCM. + LambdaTestUtils.await(60000, 1000, () -> { + verify(containerManagerMock, times(1)) + .updateContainerState(ContainerID.valueOf(1L), HddsProtos.LifeCycleEvent.DELETE); + verify(containerManagerMock, times(1)) + .updateContainerState(ContainerID.valueOf(2L), HddsProtos.LifeCycleEvent.DELETE); + return true; + }); } - private Set getMockReplicas( long containerId, State...states) { Set replicas = new HashSet<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java index 7d55e612bad..4e9965638a1 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java @@ -127,6 +127,58 @@ public void testMissingRecordRetained() { )); } + @Test + public void testEmptyMissingRecordNotInsertedButLogged() { + // Create a container that is in EMPTY_MISSING state + Set replicas = new HashSet<>(); + ContainerHealthStatus status = new ContainerHealthStatus(emptyContainer, replicas, placementPolicy, + reconContainerMetadataManager, CONF); + + // Initialize stats map + Map> unhealthyContainerStateStatsMap = new HashMap<>(); + initializeUnhealthyContainerStateStatsMap(unhealthyContainerStateStatsMap); + + // Generate records for EMPTY_MISSING container + List records = ContainerHealthTask.ContainerHealthRecords.generateUnhealthyRecords( + status, (long) 345678, unhealthyContainerStateStatsMap); + + // Assert that no records are created for EMPTY_MISSING state + assertEquals(0, records.size()); + + // Assert that the EMPTY_MISSING state is logged + assertEquals(1, unhealthyContainerStateStatsMap.get(UnHealthyContainerStates.EMPTY_MISSING) + .getOrDefault(CONTAINER_COUNT, 0L)); + } + + @Test + public void testNegativeSizeRecordNotInsertedButLogged() { + // Simulate a container with NEGATIVE_SIZE state + when(container.getUsedBytes()).thenReturn(-10L); // Negative size + Set replicas = generateReplicas(container, CLOSED, CLOSED); + ContainerHealthStatus status = + new ContainerHealthStatus(container, replicas, placementPolicy, reconContainerMetadataManager, CONF); + + // Initialize stats map + Map> + unhealthyContainerStateStatsMap = new HashMap<>(); + initializeUnhealthyContainerStateStatsMap(unhealthyContainerStateStatsMap); + + // Generate records for NEGATIVE_SIZE container + List records = + ContainerHealthTask.ContainerHealthRecords.generateUnhealthyRecords( + status, (long) 123456, unhealthyContainerStateStatsMap); + + // Assert that none of the records are for negative. + records.forEach(record -> assertFalse(record.getContainerState() + .equals(UnHealthyContainerStates.NEGATIVE_SIZE.toString()))); + + + // Assert that the NEGATIVE_SIZE state is logged + assertEquals(1, unhealthyContainerStateStatsMap.get( + UnHealthyContainerStates.NEGATIVE_SIZE).getOrDefault(CONTAINER_COUNT, 0L)); + } + + @Test public void testUnderReplicatedRecordRetainedAndUpdated() { // under replicated container @@ -396,13 +448,9 @@ public void testCorrectRecordsGenerated() { status = new ContainerHealthStatus(emptyContainer, replicas, placementPolicy, reconContainerMetadataManager, CONF); - records = ContainerHealthTask.ContainerHealthRecords + ContainerHealthTask.ContainerHealthRecords .generateUnhealthyRecords(status, (long) 345678, unhealthyContainerStateStatsMap); - assertEquals(1, records.size()); - rec = records.get(0); - assertEquals(UnHealthyContainerStates.EMPTY_MISSING.toString(), - rec.getContainerState()); assertEquals(3, rec.getExpectedReplicaCount().intValue()); assertEquals(0, rec.getActualReplicaCount().intValue()); @@ -582,6 +630,8 @@ private void initializeUnhealthyContainerStateStatsMap( UnHealthyContainerStates.OVER_REPLICATED, new HashMap<>()); unhealthyContainerStateStatsMap.put( UnHealthyContainerStates.MIS_REPLICATED, new HashMap<>()); + unhealthyContainerStateStatsMap.put( + UnHealthyContainerStates.NEGATIVE_SIZE, new HashMap<>()); } private void logUnhealthyContainerStats( @@ -590,7 +640,7 @@ private void logUnhealthyContainerStats( // If any EMPTY_MISSING containers, then it is possible that such // containers got stuck in the closing state which never got // any replicas created on the datanodes. In this case, we log it as - // EMPTY, and insert as EMPTY_MISSING in UNHEALTHY_CONTAINERS table. + // EMPTY_MISSING containers, but dont add it to the unhealthy container table. unhealthyContainerStateStatsMap.entrySet().forEach(stateEntry -> { UnHealthyContainerStates unhealthyContainerState = stateEntry.getKey(); Map containerStateStatsMap = stateEntry.getValue(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java index d007fbb1cf7..efd32693ee9 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java @@ -126,6 +126,10 @@ protected Connection getConnection() throws SQLException { return injector.getInstance(DataSource.class).getConnection(); } + protected DataSource getDataSource() { + return injector.getInstance(DataSource.class); + } + protected DSLContext getDslContext() { return dslContext; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSchemaVersionTableDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSchemaVersionTableDefinition.java new file mode 100644 index 00000000000..fad421a929d --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSchemaVersionTableDefinition.java @@ -0,0 +1,311 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.persistence; + +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.TABLE_EXISTS_CHECK; +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.listAllTables; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME; +import static org.hadoop.ozone.recon.schema.SchemaVersionTableDefinition.SCHEMA_VERSION_TABLE_NAME; +import static org.hadoop.ozone.recon.schema.StatsSchemaDefinition.GLOBAL_STATS_TABLE_NAME; +import static org.jooq.impl.DSL.name; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.recon.ReconContext; +import org.apache.hadoop.ozone.recon.ReconSchemaVersionTableManager; +import org.apache.hadoop.ozone.recon.upgrade.ReconLayoutVersionManager; +import org.hadoop.ozone.recon.schema.SchemaVersionTableDefinition; +import org.jooq.DSLContext; +import org.jooq.Record1; +import org.jooq.impl.DSL; +import org.jooq.impl.SQLDataType; +import org.junit.jupiter.api.Test; + +/** + * Test class for SchemaVersionTableDefinition. + */ +public class TestSchemaVersionTableDefinition extends AbstractReconSqlDBTest { + + public TestSchemaVersionTableDefinition() { + super(); + } + + @Test + public void testSchemaVersionTableCreation() throws Exception { + Connection connection = getConnection(); + // Verify table definition + DatabaseMetaData metaData = connection.getMetaData(); + ResultSet resultSet = metaData.getColumns(null, null, + SCHEMA_VERSION_TABLE_NAME, null); + + List> expectedPairs = new ArrayList<>(); + + expectedPairs.add(new ImmutablePair<>("version_number", Types.INTEGER)); + expectedPairs.add(new ImmutablePair<>("applied_on", Types.TIMESTAMP)); + + List> actualPairs = new ArrayList<>(); + + while (resultSet.next()) { + actualPairs.add(new ImmutablePair<>(resultSet.getString("COLUMN_NAME"), + resultSet.getInt("DATA_TYPE"))); + } + + assertEquals(2, actualPairs.size(), "Unexpected number of columns"); + assertEquals(expectedPairs, actualPairs, "Column definitions do not match expected values."); + } + + + @Test + public void testSchemaVersionCRUDOperations() throws SQLException { + Connection connection = getConnection(); + + // Ensure no tables exist initially, simulating a fresh installation + dropAllTables(connection); + + // Create the schema version table + createSchemaVersionTable(connection); + + DSLContext dslContext = DSL.using(connection); + DatabaseMetaData metaData = connection.getMetaData(); + ResultSet resultSet = metaData.getTables(null, null, + SCHEMA_VERSION_TABLE_NAME, null); + + while (resultSet.next()) { + assertEquals(SCHEMA_VERSION_TABLE_NAME, + resultSet.getString("TABLE_NAME")); + } + + // Insert a new version record + dslContext.insertInto(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .columns(DSL.field(name("version_number")), DSL.field(name("applied_on"))) + .values(1, new Timestamp(System.currentTimeMillis())) + .execute(); + + // Read the inserted record + Record1 result = dslContext.select(DSL.field(name("version_number"), Integer.class)) + .from(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .fetchOne(); + + assertEquals(1, result.value1(), "The version number does not match the expected value."); + + // Update the version record + dslContext.update(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .set(DSL.field(name("version_number")), 2) + .execute(); + + // Read the updated record + result = dslContext.select(DSL.field(name("version_number"), Integer.class)) + .from(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .fetchOne(); + + assertEquals(2, result.value1(), "The updated version number does not match the expected value."); + + // Delete the version record + dslContext.deleteFrom(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .execute(); + + // Verify deletion + int count = dslContext.fetchCount(DSL.table(SCHEMA_VERSION_TABLE_NAME)); + assertEquals(0, count, "The table should be empty after deletion."); + } + + /** + * Scenario: + * - A fresh installation of the cluster, where no tables exist initially. + * - All tables, including the schema version table, are created during initialization. + * + * Expected Outcome: + * - The schema version table is created during initialization. + * - The MLV is set to the latest SLV (Software Layout Version), indicating the schema is up-to-date. + * - No upgrade actions are triggered as all tables are already at the latest version. + */ + @Test + public void testFreshInstallScenario() throws Exception { + Connection connection = getConnection(); + + // Ensure no tables exist initially, simulating a fresh installation + dropAllTables(connection); + + // Initialize the schema + SchemaVersionTableDefinition schemaVersionTable = new SchemaVersionTableDefinition(getDataSource()); + schemaVersionTable.setLatestSLV(3); // Assuming the latest SLV = 3 + schemaVersionTable.initializeSchema(); + + // Verify that the SchemaVersionTable is created + boolean tableExists = TABLE_EXISTS_CHECK.test(connection, SCHEMA_VERSION_TABLE_NAME); + assertEquals(true, tableExists, "The Schema Version Table should be created."); + + // Initialize ReconSchemaVersionTableManager and ReconLayoutVersionManager + ReconSchemaVersionTableManager schemaVersionTableManager = new ReconSchemaVersionTableManager(getDataSource()); + ReconLayoutVersionManager layoutVersionManager = + new ReconLayoutVersionManager(schemaVersionTableManager, mock(ReconContext.class)); + + // Fetch and verify the current MLV + int mlv = layoutVersionManager.getCurrentMLV(); + assertEquals(3, mlv, "For a fresh install, MLV should be set to the latest SLV value."); + } + + /** + * Scenario: + * - The cluster was running without a schema version framework in an older version. + * - After the upgrade, the schema version table is introduced while other tables already exist. + * + * Expected Outcome: + * - The schema version table is created during initialization. + * - The MLV is set to -1, indicating the starting point of the schema version framework. + * - Ensures only necessary upgrades are executed, avoiding redundant updates. + */ + @Test + public void testPreUpgradedClusterScenario() throws Exception { + Connection connection = getConnection(); + + // Simulate the cluster by creating other tables but not the schema version table + dropTable(connection, SCHEMA_VERSION_TABLE_NAME); + if (listAllTables(connection).isEmpty()) { + createTable(connection, GLOBAL_STATS_TABLE_NAME); + createTable(connection, UNHEALTHY_CONTAINERS_TABLE_NAME); + } + + // Initialize the schema + SchemaVersionTableDefinition schemaVersionTable = new SchemaVersionTableDefinition(getDataSource()); + schemaVersionTable.initializeSchema(); + + // Verify SchemaVersionTable is created + boolean tableExists = TABLE_EXISTS_CHECK.test(connection, SCHEMA_VERSION_TABLE_NAME); + assertEquals(true, tableExists, "The Schema Version Table should be created."); + + // Initialize ReconSchemaVersionTableManager and ReconLayoutVersionManager + ReconSchemaVersionTableManager schemaVersionTableManager = new ReconSchemaVersionTableManager(getDataSource()); + ReconLayoutVersionManager layoutVersionManager = + new ReconLayoutVersionManager(schemaVersionTableManager, mock(ReconContext.class)); + + // Fetch and verify the current MLV + int mlv = layoutVersionManager.getCurrentMLV(); + assertEquals(-1, mlv, "For a pre-upgraded cluster, MLV should be set to -1."); + } + + /*** + * Scenario: + * - This simulates a cluster where the schema version table already exists, + * indicating the schema version framework is in place. + * - The schema version table contains a previously finalized Metadata Layout Version (MLV). + * + * Expected Outcome: + * - The MLV stored in the schema version table (2) is correctly read by the ReconLayoutVersionManager. + * - The MLV is retained and not overridden by the SLV value (3) during schema initialization. + * - This ensures no unnecessary upgrades are triggered and the existing MLV remains consistent. + */ + @Test + public void testUpgradedClusterScenario() throws Exception { + Connection connection = getConnection(); + + // Simulate a cluster with an existing schema version framework + dropAllTables(connection); // Ensure no previous data exists + if (listAllTables(connection).isEmpty()) { + // Create necessary tables to simulate the cluster state + createTable(connection, GLOBAL_STATS_TABLE_NAME); + createTable(connection, UNHEALTHY_CONTAINERS_TABLE_NAME); + // Create the schema version table + createSchemaVersionTable(connection); + } + + // Insert a single existing MLV (e.g., version 2) into the Schema Version Table + DSLContext dslContext = DSL.using(connection); + dslContext.insertInto(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .columns(DSL.field(name("version_number")), + DSL.field(name("applied_on"))) + .values(2, new Timestamp(System.currentTimeMillis())) + .execute(); + + // Initialize the schema + SchemaVersionTableDefinition schemaVersionTable = new SchemaVersionTableDefinition(getDataSource()); + schemaVersionTable.setLatestSLV(3); // Assuming the latest SLV = 3 + schemaVersionTable.initializeSchema(); + + // Initialize managers to interact with schema version framework + ReconSchemaVersionTableManager schemaVersionTableManager = new ReconSchemaVersionTableManager(getDataSource()); + ReconLayoutVersionManager layoutVersionManager = + new ReconLayoutVersionManager(schemaVersionTableManager, mock(ReconContext.class)); + + // Fetch and verify the current MLV stored in the database + int mlv = layoutVersionManager.getCurrentMLV(); + + // Assert that the MLV stored in the DB is retained and not overridden by the SLV value + // when running initializeSchema() before upgrade takes place + assertEquals(2, mlv, "For a cluster with an existing schema version framework, " + + "the MLV should match the value stored in the DB."); + } + + /** + * Utility method to create the schema version table. + */ + private void createSchemaVersionTable(Connection connection) throws SQLException { + DSLContext dslContext = DSL.using(connection); + dslContext.createTableIfNotExists(SCHEMA_VERSION_TABLE_NAME) + .column("version_number", SQLDataType.INTEGER.nullable(false)) + .column("applied_on", SQLDataType.TIMESTAMP.defaultValue(DSL.currentTimestamp())) + .execute(); + } + + /** + * Utility method to create a mock table. + */ + private void createTable(Connection connection, String tableName) throws SQLException { + DSLContext dslContext = DSL.using(connection); + dslContext.createTableIfNotExists(tableName) + .column("id", SQLDataType.INTEGER.nullable(false)) + .column("data", SQLDataType.VARCHAR(255)) + .execute(); + } + + /** + * Utility method to drop all tables (simulating a fresh environment). + */ + private void dropAllTables(Connection connection) throws SQLException { + DSLContext dslContext = DSL.using(connection); + List tableNames = listAllTables(connection); + if (tableNames.isEmpty()) { + return; + } + for (String tableName : tableNames) { + dslContext.dropTableIfExists(tableName).execute(); + } + } + + /** + * Utility method to drop one table. + */ + private void dropTable(Connection connection, String tableName) throws SQLException { + DSLContext dslContext = DSL.using(connection); + dslContext.dropTableIfExists(tableName).execute(); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java index eb62b7d3ece..939279fc17b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java @@ -88,7 +88,7 @@ public void setUp(@TempDir File tempDir) throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_DIRS, tempDir.getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); - store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, ReconSCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance( true, new SCMHADBTransactionBufferStub(store)); sequenceIdGen = new SequenceIdGenerator( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java index 02207f9c620..f17eb78d89c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java @@ -86,7 +86,7 @@ public void setUp() throws Exception { reconStorageConfig = new ReconStorageConfig(conf, reconUtils); versionManager = new HDDSLayoutVersionManager( reconStorageConfig.getLayoutVersion()); - store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, ReconSCMDBDefinition.get()); reconContext = new ReconContext(conf, reconUtils); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java index d723ee75e85..302772e40fd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java @@ -94,7 +94,7 @@ public void setup() throws IOException { temporaryFolder.toAbsolutePath().toString()); conf.set(OZONE_SCM_NAMES, "localhost"); scmStorageConfig = new ReconStorageConfig(conf, new ReconUtils()); - store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, ReconSCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance( true, new SCMHADBTransactionBufferStub(store)); scmContext = SCMContext.emptyContext(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index 3831f03bfd8..f4f0bfe9acd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -68,7 +68,7 @@ public class TestOMDBUpdatesHandler { private OMMetadataManager omMetadataManager; private OMMetadataManager reconOmMetadataManager; - private OMDBDefinition omdbDefinition = new OMDBDefinition(); + private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); private Random random = new Random(); private OzoneConfiguration createNewTestPath(String folderName) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java index 0adb44e87ca..7da98acb38f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java @@ -53,7 +53,7 @@ public class TestOmUpdateEventValidator { private OmUpdateEventValidator eventValidator; - private OMDBDefinition omdbDefinition; + private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); private OMMetadataManager omMetadataManager; private Logger logger; @TempDir @@ -63,11 +63,10 @@ public class TestOmUpdateEventValidator { public void setUp() throws IOException { omMetadataManager = initializeNewOmMetadataManager( temporaryFolder.toFile()); - omdbDefinition = new OMDBDefinition(); eventValidator = new OmUpdateEventValidator(omdbDefinition); // Create a mock logger logger = mock(Logger.class); - eventValidator.setLogger(logger); + OmUpdateEventValidator.setLogger(logger); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java new file mode 100644 index 00000000000..b2399f42362 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; + +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME; +import static org.jooq.impl.DSL.field; +import static org.jooq.impl.DSL.name; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test class for InitialConstraintUpgradeAction. + */ +public class TestInitialConstraintUpgradeAction extends AbstractReconSqlDBTest { + + private InitialConstraintUpgradeAction upgradeAction; + private DSLContext dslContext; + private ReconStorageContainerManagerFacade mockScmFacade; + + @BeforeEach + public void setUp() throws SQLException { + // Initialize the DSLContext + dslContext = getDslContext(); + + // Initialize the upgrade action + upgradeAction = new InitialConstraintUpgradeAction(); + + // Mock the SCM facade to provide the DataSource + mockScmFacade = mock(ReconStorageContainerManagerFacade.class); + DataSource dataSource = getInjector().getInstance(DataSource.class); + when(mockScmFacade.getDataSource()).thenReturn(dataSource); + + // Set the DataSource and DSLContext directly + upgradeAction.setDataSource(dataSource); + upgradeAction.setDslContext(dslContext); + + // Check if the table already exists + try (Connection conn = dataSource.getConnection()) { + DatabaseMetaData dbMetaData = conn.getMetaData(); + ResultSet tables = dbMetaData.getTables(null, null, UNHEALTHY_CONTAINERS_TABLE_NAME, null); + if (!tables.next()) { + // Create the initial table if it does not exist + dslContext.createTable(UNHEALTHY_CONTAINERS_TABLE_NAME) + .column("container_id", org.jooq.impl.SQLDataType.BIGINT + .nullable(false)) + .column("container_state", org.jooq.impl.SQLDataType.VARCHAR(16) + .nullable(false)) + .constraint(DSL.constraint("pk_container_id") + .primaryKey("container_id", "container_state")) + .execute(); + } + } + } + + @Test + public void testUpgradeAppliesConstraintModificationForAllStates() throws SQLException { + // Run the upgrade action + upgradeAction.execute(mockScmFacade); + + // Iterate over all valid states and insert records + for (ContainerSchemaDefinition.UnHealthyContainerStates state : + ContainerSchemaDefinition.UnHealthyContainerStates.values()) { + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values( + System.currentTimeMillis(), // Unique container_id for each record + state.name(), System.currentTimeMillis(), 3, 2, 1, "Replica count mismatch" + ) + .execute(); + } + + // Verify that the number of inserted records matches the number of enum values + int count = dslContext.fetchCount(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)); + assertEquals(ContainerSchemaDefinition.UnHealthyContainerStates.values().length, + count, "Expected one record for each valid state"); + + // Try inserting an invalid state (should fail due to constraint) + assertThrows(org.jooq.exception.DataAccessException.class, () -> + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values(999L, "INVALID_STATE", System.currentTimeMillis(), 3, 2, 1, + "Invalid state test").execute(), + "Inserting an invalid container_state should fail due to the constraint"); + } + + @Test + public void testInsertionWithNullContainerState() { + assertThrows(org.jooq.exception.DataAccessException.class, () -> { + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values( + 100L, // container_id + null, // container_state is NULL + System.currentTimeMillis(), 3, 2, 1, "Testing NULL state" + ) + .execute(); + }, "Inserting a NULL container_state should fail due to the NOT NULL constraint"); + } + + @Test + public void testDuplicatePrimaryKeyInsertion() throws SQLException { + // Insert the first record + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values(200L, "MISSING", System.currentTimeMillis(), 3, 2, 1, "First insertion" + ) + .execute(); + + // Try inserting a duplicate record with the same primary key + assertThrows(org.jooq.exception.DataAccessException.class, () -> { + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values(200L, "MISSING", System.currentTimeMillis(), 3, 2, 1, "Duplicate insertion" + ) + .execute(); + }, "Inserting a duplicate primary key should fail due to the primary key constraint"); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReconLayoutVersionManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReconLayoutVersionManager.java new file mode 100644 index 00000000000..a22c737691d --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReconLayoutVersionManager.java @@ -0,0 +1,365 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.ReconContext; +import org.apache.hadoop.ozone.recon.ReconSchemaVersionTableManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.mockito.InOrder; +import org.mockito.MockedStatic; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.AfterEach; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.anyInt; + + +/** + * Tests for ReconLayoutVersionManager. + */ +public class TestReconLayoutVersionManager { + + private ReconSchemaVersionTableManager schemaVersionTableManager; + private ReconLayoutVersionManager layoutVersionManager; + private MockedStatic mockedEnum; + private MockedStatic mockedEnumUpgradeActionType; + private ReconStorageContainerManagerFacade scmFacadeMock; + private DataSource mockDataSource; + private Connection mockConnection; + + @BeforeEach + public void setUp() throws SQLException { + schemaVersionTableManager = mock(ReconSchemaVersionTableManager.class); + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(0); + + // Mocking ReconLayoutFeature.values() to return custom enum instances + mockedEnum = mockStatic(ReconLayoutFeature.class); + mockedEnumUpgradeActionType = mockStatic(ReconUpgradeAction.UpgradeActionType.class); + + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + ReconLayoutFeature feature2 = mock(ReconLayoutFeature.class); + when(feature2.getVersion()).thenReturn(2); + ReconUpgradeAction action2 = mock(ReconUpgradeAction.class); + when(feature2.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action2)); + + // Define the custom features to be returned + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1, feature2}); + + layoutVersionManager = new ReconLayoutVersionManager(schemaVersionTableManager, mock(ReconContext.class)); + + // Common mocks for all tests + scmFacadeMock = mock(ReconStorageContainerManagerFacade.class); + mockDataSource = mock(DataSource.class); + mockConnection = mock(Connection.class); + + when(scmFacadeMock.getDataSource()).thenReturn(mockDataSource); + when(mockDataSource.getConnection()).thenReturn(mockConnection); + + doNothing().when(mockConnection).setAutoCommit(false); + doNothing().when(mockConnection).commit(); + doNothing().when(mockConnection).rollback(); + } + + @AfterEach + public void tearDown() { + // Close the static mock after each test to deregister it + mockedEnum.close(); + if (mockedEnumUpgradeActionType != null) { + mockedEnumUpgradeActionType.close(); + } + } + + /** + * Tests the initialization of layout version manager to ensure + * that the MLV (Metadata Layout Version) is set correctly to 0, + * and SLV (Software Layout Version) reflects the maximum available version. + */ + @Test + public void testInitializationWithMockedValues() { + assertEquals(0, layoutVersionManager.getCurrentMLV()); + assertEquals(2, layoutVersionManager.getCurrentSLV()); + } + + /** + * Tests the finalization of layout features and ensures that the updateSchemaVersion for + * the schemaVersionTable is triggered for each feature version. + */ + @Test + public void testFinalizeLayoutFeaturesWithMockedValues() throws SQLException { + // Execute the method under test + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that schema versions are updated for our custom features + verify(schemaVersionTableManager, times(1)) + .updateSchemaVersion(1, mockConnection); + verify(schemaVersionTableManager, times(1)) + .updateSchemaVersion(2, mockConnection); + } + + /** + * Tests the retrieval of registered features to ensure that the correct + * layout features are returned according to the mocked values. + */ + @Test + public void testGetRegisteredFeaturesWithMockedValues() { + // Fetch the registered features + List registeredFeatures = layoutVersionManager.getRegisteredFeatures(); + + // Verify that the registered features match the mocked ones + ReconLayoutFeature feature1 = ReconLayoutFeature.values()[0]; + ReconLayoutFeature feature2 = ReconLayoutFeature.values()[1]; + List expectedFeatures = Arrays.asList(feature1, feature2); + assertEquals(expectedFeatures, registeredFeatures); + } + + /** + * Tests the scenario where no layout features are present. Ensures that no schema + * version updates are attempted when there are no features to finalize. + */ + @Test + public void testNoLayoutFeatures() throws SQLException { + // Ensure no layout features are present + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{}); + + // Execute the method under test + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that no schema version updates were attempted + verify(schemaVersionTableManager, never()).updateSchemaVersion(anyInt(), any(Connection.class)); + } + + /** + * Tests the scenario where an upgrade action fails. Ensures that if an upgrade action + * throws an exception, the schema version is not updated. + */ + @Test + public void testUpgradeActionFailure() throws Exception { + // Reset existing mocks and set up new features for this specific test + mockedEnum.reset(); + + // Mock ReconLayoutFeature instances + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + + // Simulate an exception being thrown during the upgrade action execution + doThrow(new RuntimeException("Upgrade failed")).when(action1).execute(scmFacadeMock); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + // Mock the static values method to return the custom feature + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1}); + + // Execute the layout feature finalization + try { + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + } catch (Exception e) { + // Exception is expected, so it's fine to catch and ignore it here + } + + // Verify that metadata layout version MLV was not updated as the transaction was rolled back + assertEquals(0, layoutVersionManager.getCurrentMLV()); + + // Verify that a rollback was triggered + verify(mockConnection, times(1)).rollback(); + } + + /** + * Tests the scenario where the schema version update fails. Ensures that if the schema + * version update fails, the transaction is rolled back and the metadata layout version + * is not updated. + */ + @Test + public void testUpdateSchemaFailure() throws Exception { + // Reset existing mocks and set up new features for this specific test + mockedEnum.reset(); + + // Mock ReconLayoutFeature instances + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + + // Simulate an exception being thrown during the schema version update + doThrow(new RuntimeException("Schema update failed")).when(schemaVersionTableManager). + updateSchemaVersion(1, mockConnection); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + // Mock the static values method to return the custom feature + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1}); + + // Execute the layout feature finalization + try { + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + } catch (Exception e) { + // Exception is expected, so it's fine to catch and ignore it here + } + + // Verify that metadata layout version MLV was not updated as the transaction was rolled back + assertEquals(0, layoutVersionManager.getCurrentMLV()); + + // Verify that the upgrade action was not committed and a rollback was triggered + verify(mockConnection, times(1)).rollback(); + } + + /** + * Tests the order of execution for the upgrade actions to ensure that + * they are executed sequentially according to their version numbers. + */ + @Test + public void testUpgradeActionExecutionOrder() throws Exception { + // Reset the existing static mock for this specific test + mockedEnum.reset(); + + // Mock ReconLayoutFeature instances + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + ReconLayoutFeature feature2 = mock(ReconLayoutFeature.class); + when(feature2.getVersion()).thenReturn(2); + ReconUpgradeAction action2 = mock(ReconUpgradeAction.class); + when(feature2.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action2)); + + ReconLayoutFeature feature3 = mock(ReconLayoutFeature.class); + when(feature3.getVersion()).thenReturn(3); + ReconUpgradeAction action3 = mock(ReconUpgradeAction.class); + when(feature3.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action3)); + + // Mock the static values method to return custom features in a jumbled order + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature2, feature3, feature1}); + + // Execute the layout feature finalization + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that the actions were executed in the correct order using InOrder + InOrder inOrder = inOrder(action1, action2, action3); + inOrder.verify(action1).execute(scmFacadeMock); // Should be executed first + inOrder.verify(action2).execute(scmFacadeMock); // Should be executed second + inOrder.verify(action3).execute(scmFacadeMock); // Should be executed third + } + + /** + * Tests the scenario where no upgrade actions are needed. Ensures that if the current + * schema version matches the maximum layout version, no upgrade actions are executed. + */ + @Test + public void testNoUpgradeActionsNeeded() throws SQLException { + // Mock the current schema version to the maximum layout version + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(0); + + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{}); + + // Execute the method under test + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that no schema version updates were attempted + verify(schemaVersionTableManager, never()).updateSchemaVersion(anyInt(), eq(mockConnection)); + } + + /** + * Tests the scenario where the first two features are finalized, + * and then a third feature is introduced. Ensures that only the + * newly introduced feature is finalized while the previously + * finalized features are skipped. + */ + @Test + public void testFinalizingNewFeatureWithoutReFinalizingPreviousFeatures() throws Exception { + // Step 1: Mock the schema version manager + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(0); + + // Mock the first two features + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + ReconLayoutFeature feature2 = mock(ReconLayoutFeature.class); + when(feature2.getVersion()).thenReturn(2); + ReconUpgradeAction action2 = mock(ReconUpgradeAction.class); + when(feature2.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action2)); + + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1, feature2}); + + // Finalize the first two features. + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that the schema versions for the first two features were updated + verify(schemaVersionTableManager, times(1)).updateSchemaVersion(1, mockConnection); + verify(schemaVersionTableManager, times(1)).updateSchemaVersion(2, mockConnection); + + // Step 2: Introduce a new feature (Feature 3) + ReconLayoutFeature feature3 = mock(ReconLayoutFeature.class); + when(feature3.getVersion()).thenReturn(3); + ReconUpgradeAction action3 = mock(ReconUpgradeAction.class); + when(feature3.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action3)); + + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1, feature2, feature3}); + + // Update schema version to simulate that features 1 and 2 have already been finalized. + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(2); + + // Finalize again, but only feature 3 should be finalized. + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that the schema version for feature 3 was updated + verify(schemaVersionTableManager, times(1)).updateSchemaVersion(3, mockConnection); + + // Verify that action1 and action2 were not executed again. + verify(action1, times(1)).execute(scmFacadeMock); + verify(action2, times(1)).execute(scmFacadeMock); + + // Verify that the upgrade action for feature 3 was executed. + verify(action3, times(1)).execute(scmFacadeMock); + } + +} diff --git a/hadoop-ozone/s3-secret-store/pom.xml b/hadoop-ozone/s3-secret-store/pom.xml index b2da4c9e3c8..210969e766a 100644 --- a/hadoop-ozone/s3-secret-store/pom.xml +++ b/hadoop-ozone/s3-secret-store/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-s3-secret-store Apache Ozone S3 Secret Store jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index c26171d98ac..f012d3f1aab 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -19,13 +19,14 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-s3gateway Apache Ozone S3 Gateway jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT + false UTF-8 true diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java index 20c2f4c6275..2975d0f39fa 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java @@ -48,7 +48,11 @@ public enum S3GAction implements AuditAction { DELETE_KEY, CREATE_DIRECTORY, GENERATE_SECRET, - REVOKE_SECRET; + REVOKE_SECRET, + GET_OBJECT_TAGGING, + PUT_OBJECT_TAGGING, + DELETE_OBJECT_TAGGING, + PUT_OBJECT_ACL; @Override public String getAction() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index d49ff17f3bf..cc63663bf22 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -19,11 +19,9 @@ import javax.annotation.Priority; import javax.inject.Inject; -import javax.ws.rs.WebApplicationException; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.container.ContainerRequestFilter; import javax.ws.rs.container.PreMatching; -import javax.ws.rs.core.Response; import javax.ws.rs.ext.Provider; import com.google.common.annotations.VisibleForTesting; @@ -41,6 +39,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ACCESS_DENIED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INTERNAL_ERROR; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_AUTHINFO_CREATION_ERROR; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Filter used to construct string to sign from unfiltered request. @@ -116,10 +115,4 @@ public SignatureInfo getSignatureInfo() { return signatureInfo; } - private WebApplicationException wrapOS3Exception(OS3Exception os3Exception) { - return new WebApplicationException(os3Exception.getErrorMessage(), - os3Exception, - Response.status(os3Exception.getHttpCode()) - .entity(os3Exception.toXml()).build()); - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java index 86d25d19417..9816b023dc4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java @@ -18,7 +18,9 @@ package org.apache.hadoop.ozone.s3; import java.io.IOException; +import java.net.InetSocketAddress; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -57,7 +59,6 @@ public class Gateway extends GenericCli { private S3GatewayHttpServer httpServer; private S3GatewayMetrics metrics; - private OzoneConfiguration ozoneConfiguration; private final JvmPauseMonitor jvmPauseMonitor = newJvmPauseMonitor("S3G"); @@ -71,14 +72,14 @@ public static void main(String[] args) throws Exception { @Override public Void call() throws Exception { - ozoneConfiguration = createOzoneConfiguration(); - TracingUtil.initTracing("S3gateway", ozoneConfiguration); + OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); OzoneConfigurationHolder.setConfiguration(ozoneConfiguration); - UserGroupInformation.setConfiguration(ozoneConfiguration); - loginS3GUser(ozoneConfiguration); - setHttpBaseDir(ozoneConfiguration); - httpServer = new S3GatewayHttpServer(ozoneConfiguration, "s3gateway"); - metrics = S3GatewayMetrics.create(ozoneConfiguration); + TracingUtil.initTracing("S3gateway", OzoneConfigurationHolder.configuration()); + UserGroupInformation.setConfiguration(OzoneConfigurationHolder.configuration()); + loginS3GUser(OzoneConfigurationHolder.configuration()); + setHttpBaseDir(OzoneConfigurationHolder.configuration()); + httpServer = new S3GatewayHttpServer(OzoneConfigurationHolder.configuration(), "s3gateway"); + metrics = S3GatewayMetrics.create(OzoneConfigurationHolder.configuration()); start(); ShutdownHookManager.get().addShutdownHook(() -> { @@ -95,10 +96,10 @@ public void start() throws IOException { String[] originalArgs = getCmd().getParseResult().originalArgs() .toArray(new String[0]); HddsServerUtil.startupShutdownMessage(OzoneVersionInfo.OZONE_VERSION_INFO, - Gateway.class, originalArgs, LOG, ozoneConfiguration); + Gateway.class, originalArgs, LOG, OzoneConfigurationHolder.configuration()); LOG.info("Starting Ozone S3 gateway"); - HddsServerUtil.initializeMetrics(ozoneConfiguration, "S3Gateway"); + HddsServerUtil.initializeMetrics(OzoneConfigurationHolder.configuration(), "S3Gateway"); jvmPauseMonitor.start(); httpServer.start(); } @@ -133,4 +134,14 @@ private static void loginS3GUser(OzoneConfiguration conf) } } + @VisibleForTesting + public InetSocketAddress getHttpAddress() { + return this.httpServer.getHttpAddress(); + } + + @VisibleForTesting + public InetSocketAddress getHttpsAddress() { + return this.httpServer.getHttpsAddress(); + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java index 4f08527668c..7614c4933a8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java @@ -154,8 +154,6 @@ private void setCertificate(String omServiceID, } } catch (CertificateException ce) { throw new IOException(ce); - } catch (IOException e) { - throw e; } finally { if (certClient != null) { certClient.close(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java index 4aeab1f3c4a..9d6f7a82252 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java @@ -19,6 +19,7 @@ import javax.enterprise.inject.Produces; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; /** @@ -27,17 +28,30 @@ * As the OzoneConfiguration is created by the CLI application here we inject * it via a singleton instance to the Jax-RS/CDI instances. */ -public class OzoneConfigurationHolder { +public final class OzoneConfigurationHolder { private static OzoneConfiguration configuration; + private OzoneConfigurationHolder() { + } + @Produces - public OzoneConfiguration configuration() { + public static OzoneConfiguration configuration() { return configuration; } + @VisibleForTesting public static void setConfiguration( OzoneConfiguration conf) { - OzoneConfigurationHolder.configuration = conf; + // Nullity check is used in case the configuration was already set + // in the MiniOzoneCluster + if (configuration == null) { + OzoneConfigurationHolder.configuration = conf; + } + } + + @VisibleForTesting + public static void resetConfiguration() { + configuration = null; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java index a058e413b96..9160025a016 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java @@ -56,7 +56,7 @@ public final class S3GatewayConfigKeys { public static final String OZONE_S3G_CLIENT_BUFFER_SIZE_KEY = "ozone.s3g.client.buffer.size"; public static final String OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT = - "4KB"; + "4MB"; // S3G kerberos, principal config public static final String OZONE_S3G_KERBEROS_KEYTAB_FILE_KEY = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java index 97117a30bbd..8b6af74e072 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java @@ -114,7 +114,7 @@ protected String getHttpAddressKey() { @Override protected String getHttpBindHostKey() { - return OZONE_S3G_HTTP_BIND_HOST_KEY; + return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_KEY; } @Override @@ -144,12 +144,12 @@ protected int getHttpsBindPortDefault() { @Override protected String getKeytabFile() { - return OZONE_S3G_KEYTAB_FILE; + return S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE; } @Override protected String getSpnegoPrincipal() { - return OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; + return S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; } @Override diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 1b845c79aeb..e68a59e7f76 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -69,6 +69,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.ETAG; @@ -221,6 +222,9 @@ public Response get( // example prefix: dir1/ key: dir123 continue; } + if (startAfter != null && count == 0 && Objects.equals(startAfter, next.getName())) { + continue; + } String relativeKeyName = next.getName().substring(prefix.length()); int depth = StringUtils.countMatches(relativeKeyName, delimiter); @@ -564,8 +568,7 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders, if (grantReads == null && grantWrites == null && grantReadACP == null && grantWriteACP == null && grantFull == null) { S3BucketAcl putBucketAclRequest = - new PutBucketAclRequestUnmarshaller().readFrom( - null, null, null, null, null, body); + new PutBucketAclRequestUnmarshaller().readFrom(body); // Handle grants in body ozoneAclListOnBucket.addAll( S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest)); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java index cdaaa228ecd..3ab9a123cc7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java @@ -17,49 +17,27 @@ */ package org.apache.hadoop.ozone.s3.endpoint; -import org.xml.sax.InputSource; -import org.xml.sax.XMLReader; - import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; -import javax.xml.XMLConstants; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.UnmarshallerHandler; -import javax.xml.parsers.SAXParserFactory; import java.io.IOException; import java.io.InputStream; import java.lang.annotation.Annotation; import java.lang.reflect.Type; import javax.ws.rs.ext.Provider; -import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Custom unmarshaller to read CompleteMultipartUploadRequest wo namespace. */ @Provider public class CompleteMultipartUploadRequestUnmarshaller - implements MessageBodyReader { - - private final JAXBContext context; - private final SAXParserFactory saxParserFactory; + extends MessageUnmarshaller { public CompleteMultipartUploadRequestUnmarshaller() { - try { - context = JAXBContext.newInstance(CompleteMultipartUploadRequest.class); - saxParserFactory = SAXParserFactory.newInstance(); - saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - } catch (Exception ex) { - throw new AssertionError("Can not instantiate " + - "CompleteMultipartUploadRequest parser", ex); - } - } - @Override - public boolean isReadable(Class aClass, Type type, - Annotation[] annotations, MediaType mediaType) { - return type.equals(CompleteMultipartUploadRequest.class); + super(CompleteMultipartUploadRequest.class); } @Override @@ -67,19 +45,15 @@ public CompleteMultipartUploadRequest readFrom( Class aClass, Type type, Annotation[] annotations, MediaType mediaType, MultivaluedMap multivaluedMap, - InputStream inputStream) throws IOException, WebApplicationException { + InputStream inputStream) throws WebApplicationException { try { - XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); - UnmarshallerHandler unmarshallerHandler = - context.createUnmarshaller().getUnmarshallerHandler(); - XmlNamespaceFilter filter = - new XmlNamespaceFilter(S3_XML_NAMESPACE); - filter.setContentHandler(unmarshallerHandler); - filter.setParent(xmlReader); - filter.parse(new InputSource(inputStream)); - return (CompleteMultipartUploadRequest) unmarshallerHandler.getResult(); - } catch (Exception e) { - throw new WebApplicationException("Can't parse request body to XML.", e); + if (inputStream.available() == 0) { + throw wrapOS3Exception(INVALID_REQUEST.withMessage("You must specify at least one part")); + } + return super.readFrom(aClass, type, annotations, mediaType, multivaluedMap, inputStream); + } catch (IOException e) { + throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); } } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 4ffc3011935..fbb0614c4f4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -72,6 +72,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_TAG; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; +import static org.apache.hadoop.ozone.s3.util.S3Consts.AWS_TAG_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT; @@ -363,59 +364,70 @@ protected Map getTaggingFromHeaders(HttpHeaders httpHeaders) List tagPairs = URLEncodedUtils.parse(tagString, UTF_8); - if (tagPairs.isEmpty()) { - return Collections.emptyMap(); - } + return validateAndGetTagging(tagPairs, NameValuePair::getName, NameValuePair::getValue); + } - Map tags = new HashMap<>(); - // Tag restrictions: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html - for (NameValuePair tagPair: tagPairs) { - if (StringUtils.isEmpty(tagPair.getName())) { - OS3Exception ex = newError(INVALID_TAG, TAG_HEADER); - ex.setErrorMessage("Some tag keys are empty, please specify the non-empty tag keys"); + protected static Map validateAndGetTagging( + List tagList, + Function getTagKey, + Function getTagValue + ) throws OS3Exception { + final Map tags = new HashMap<>(); + for (KV tagPair : tagList) { + final String tagKey = getTagKey.apply(tagPair); + final String tagValue = getTagValue.apply(tagPair); + // Tag restrictions: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html + if (StringUtils.isEmpty(tagKey)) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, TAG_HEADER); + ex.setErrorMessage("Some tag keys are empty, please only specify non-empty tag keys"); throw ex; } - if (tagPair.getValue() == null) { - // For example for query parameter with only value (e.g. "tag1") - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); - ex.setErrorMessage("Some tag values are not specified, please specify the tag values"); + if (StringUtils.startsWith(tagKey, AWS_TAG_PREFIX)) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("Tag key cannot start with \"aws:\" prefix"); throw ex; } - if (tags.containsKey(tagPair.getName())) { - // Tags that are associated with an object must have unique tag keys - // Reject request if the same key is used twice on the same resource - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); - ex.setErrorMessage("There are tags with duplicate tag keys, tag keys should be unique"); + if (tagValue == null) { + // For example for query parameter with only value (e.g. "tag1") + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("Some tag values are not specified, please specify the tag values"); throw ex; } - if (tagPair.getName().length() > TAG_KEY_LENGTH_LIMIT) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + if (tagKey.length() > TAG_KEY_LENGTH_LIMIT) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); ex.setErrorMessage("The tag key exceeds the maximum length of " + TAG_KEY_LENGTH_LIMIT); throw ex; } - if (tagPair.getValue().length() > TAG_VALUE_LENGTH_LIMIT) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + if (tagValue.length() > TAG_VALUE_LENGTH_LIMIT) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagValue); ex.setErrorMessage("The tag value exceeds the maximum length of " + TAG_VALUE_LENGTH_LIMIT); throw ex; } - if (!TAG_REGEX_PATTERN.matcher(tagPair.getName()).matches()) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + if (!TAG_REGEX_PATTERN.matcher(tagKey).matches()) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); ex.setErrorMessage("The tag key does not have a valid pattern"); throw ex; } - if (!TAG_REGEX_PATTERN.matcher(tagPair.getValue()).matches()) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + if (!TAG_REGEX_PATTERN.matcher(tagValue).matches()) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagValue); ex.setErrorMessage("The tag value does not have a valid pattern"); throw ex; } - tags.put(tagPair.getName(), tagPair.getValue()); + final String previous = tags.put(tagKey, tagValue); + if (previous != null) { + // Tags that are associated with an object must have unique tag keys + // Reject request if the same key is used twice on the same resource + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("There are tags with duplicate tag keys, tag keys should be unique"); + throw ex; + } } if (tags.size() > TAG_NUM_LIMIT) { @@ -426,7 +438,7 @@ protected Map getTaggingFromHeaders(HttpHeaders httpHeaders) throw ex; } - return tags; + return Collections.unmodifiableMap(tags); } private AuditMessage.Builder auditMessageBaseBuilder(AuditAction op, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MessageUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MessageUnmarshaller.java new file mode 100644 index 00000000000..dd50598c7c5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MessageUnmarshaller.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.s3.endpoint; + +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.MessageBodyReader; +import javax.xml.XMLConstants; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.UnmarshallerHandler; +import javax.xml.parsers.SAXParserFactory; +import java.io.InputStream; +import java.lang.annotation.Annotation; +import java.lang.reflect.Type; + +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; +import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; + +/** + * Unmarshaller to create instances of type {@code T} from XML, + * which may or may not have namespace. + * @param the object type to read from XML + */ +public class MessageUnmarshaller implements MessageBodyReader { + + private final JAXBContext context; + private final SAXParserFactory saxParserFactory; + private final Class cls; + + public MessageUnmarshaller(Class cls) { + this.cls = cls; + + try { + context = JAXBContext.newInstance(cls); + saxParserFactory = SAXParserFactory.newInstance(); + saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); + } catch (Exception ex) { + throw new AssertionError("Can not instantiate XML parser for " + cls.getSimpleName(), ex); + } + } + + @Override + public boolean isReadable(Class aClass, Type type, + Annotation[] annotations, MediaType mediaType) { + return type.equals(cls); + } + + @Override + public T readFrom( + Class aClass, Type type, + Annotation[] annotations, MediaType mediaType, + MultivaluedMap multivaluedMap, + InputStream inputStream + ) throws WebApplicationException { + try { + XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); + UnmarshallerHandler unmarshallerHandler = + context.createUnmarshaller().getUnmarshallerHandler(); + XmlNamespaceFilter filter = + new XmlNamespaceFilter(S3_XML_NAMESPACE); + filter.setContentHandler(unmarshallerHandler); + filter.setParent(xmlReader); + filter.parse(new InputSource(inputStream)); + return cls.cast(unmarshallerHandler.getResult()); + } catch (Exception e) { + throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); + } + } + + /** Convenience method for programmatic invocation. */ + public T readFrom(InputStream inputStream) throws WebApplicationException { + return readFrom(cls, cls, new Annotation[0], MediaType.APPLICATION_XML_TYPE, null, inputStream); + } + +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java index 0c34c08091a..3102fb94f08 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java @@ -18,21 +18,8 @@ package org.apache.hadoop.ozone.s3.endpoint; import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; import javax.ws.rs.ext.Provider; -import javax.xml.XMLConstants; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.UnmarshallerHandler; -import javax.xml.parsers.SAXParserFactory; -import java.io.InputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -import org.xml.sax.InputSource; -import org.xml.sax.XMLReader; /** * Custom unmarshaller to read MultiDeleteRequest w/wo namespace. @@ -40,45 +27,10 @@ @Provider @Produces(MediaType.APPLICATION_XML) public class MultiDeleteRequestUnmarshaller - implements MessageBodyReader { - - private final JAXBContext context; - private final SAXParserFactory saxParserFactory; + extends MessageUnmarshaller { public MultiDeleteRequestUnmarshaller() { - try { - context = JAXBContext.newInstance(MultiDeleteRequest.class); - saxParserFactory = SAXParserFactory.newInstance(); - saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - } catch (Exception ex) { - throw new AssertionError("Can't instantiate MultiDeleteRequest parser", - ex); - } - } - - @Override - public boolean isReadable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return type.equals(MultiDeleteRequest.class); + super(MultiDeleteRequest.class); } - @Override - public MultiDeleteRequest readFrom(Class type, - Type genericType, Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, InputStream entityStream) { - try { - XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); - UnmarshallerHandler unmarshallerHandler = - context.createUnmarshaller().getUnmarshallerHandler(); - - XmlNamespaceFilter filter = - new XmlNamespaceFilter("http://s3.amazonaws.com/doc/2006-03-01/"); - filter.setContentHandler(unmarshallerHandler); - filter.setParent(xmlReader); - filter.parse(new InputSource(entityStream)); - return (MultiDeleteRequest) unmarshallerHandler.getResult(); - } catch (Exception e) { - throw new WebApplicationException("Can't parse request body to XML.", e); - } - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 40b1e013a46..9311fb7fa4b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -68,6 +68,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.s3.HeaderPreprocessor; import org.apache.hadoop.ozone.s3.SignedChunksInputStream; +import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.RFC1123Util; @@ -120,6 +121,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; @@ -212,7 +214,7 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @PUT public Response put( @PathParam("bucket") String bucketName, @@ -220,6 +222,8 @@ public Response put( @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, + @QueryParam("tagging") String taggingMarker, + @QueryParam("acl") String aclMarker, final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; @@ -229,7 +233,16 @@ public Response put( String copyHeader = null, storageType = null; DigestInputStream digestInputStream = null; try { + if (aclMarker != null) { + s3GAction = S3GAction.PUT_OBJECT_ACL; + throw newError(NOT_IMPLEMENTED, keyPath); + } OzoneVolume volume = getVolume(); + if (taggingMarker != null) { + s3GAction = S3GAction.PUT_OBJECT_TAGGING; + return putObjectTagging(volume, bucketName, keyPath, body); + } + if (uploadID != null && !uploadID.equals("")) { if (headers.getHeaderString(COPY_SOURCE_HEADER) == null) { s3GAction = S3GAction.CREATE_MULTIPART_KEY; @@ -310,7 +323,7 @@ public Response put( perf.appendStreamMode(); Pair keyWriteResult = ObjectEndpointStreaming .put(bucket, keyPath, length, replicationConfig, chunkSize, - customMetadata, digestInputStream, perf); + customMetadata, tags, digestInputStream, perf); eTag = keyWriteResult.getKey(); putLength = keyWriteResult.getValue(); } else { @@ -320,7 +333,7 @@ public Response put( long metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - putLength = IOUtils.copyLarge(digestInputStream, output); + putLength = IOUtils.copy(digestInputStream, output, getIOBufferSize(length)); eTag = DatatypeConverter.printHexBinary( digestInputStream.getMessageDigest().digest()) .toLowerCase(); @@ -336,7 +349,9 @@ public Response put( } catch (OMException ex) { auditSuccess = false; auditWriteFailure(s3GAction, ex); - if (copyHeader != null) { + if (taggingMarker != null) { + getMetrics().updatePutObjectTaggingFailureStats(startNanos); + } else if (copyHeader != null) { getMetrics().updateCopyObjectFailureStats(startNanos); } else { getMetrics().updateCreateKeyFailureStats(startNanos); @@ -360,7 +375,11 @@ public Response put( } catch (Exception ex) { auditSuccess = false; auditWriteFailure(s3GAction, ex); - if (copyHeader != null) { + if (aclMarker != null) { + getMetrics().updatePutObjectAclFailureStats(startNanos); + } else if (taggingMarker != null) { + getMetrics().updatePutObjectTaggingFailureStats(startNanos); + } else if (copyHeader != null) { getMetrics().updateCopyObjectFailureStats(startNanos); } else { getMetrics().updateCreateKeyFailureStats(startNanos); @@ -390,7 +409,7 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @GET public Response get( @PathParam("bucket") String bucketName, @@ -398,12 +417,18 @@ public Response get( @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") String uploadId, @QueryParam("max-parts") @DefaultValue("1000") int maxParts, - @QueryParam("part-number-marker") String partNumberMarker) + @QueryParam("part-number-marker") String partNumberMarker, + @QueryParam("tagging") String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); try { + if (taggingMarker != null) { + s3GAction = S3GAction.GET_OBJECT_TAGGING; + return getObjectTagging(bucketName, keyPath); + } + if (uploadId != null) { // When we have uploadId, this is the request for list Parts. s3GAction = S3GAction.LIST_PARTS; @@ -443,7 +468,7 @@ public Response get( if (rangeHeaderVal == null || rangeHeader.isReadFull()) { StreamingOutput output = dest -> { try (OzoneInputStream key = keyDetails.getContent()) { - long readLength = IOUtils.copyLarge(key, dest); + long readLength = IOUtils.copy(key, dest, getIOBufferSize(keyDetails.getDataSize())); getMetrics().incGetKeySuccessLength(readLength); perf.appendSizeBytes(readLength); } @@ -467,7 +492,7 @@ public Response get( try (OzoneInputStream ozoneInputStream = keyDetails.getContent()) { ozoneInputStream.seek(startOffset); long readLength = IOUtils.copyLarge(ozoneInputStream, dest, 0, - copyLength, new byte[bufferSize]); + copyLength, new byte[getIOBufferSize(copyLength)]); getMetrics().incGetKeySuccessLength(readLength); perf.appendSizeBytes(readLength); } @@ -532,7 +557,9 @@ public Response get( AUDIT.logReadFailure( buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex) ); - if (uploadId != null) { + if (taggingMarker != null) { + getMetrics().updateGetObjectTaggingFailureStats(startNanos); + } else if (uploadId != null) { getMetrics().updateListPartsFailureStats(startNanos); } else { getMetrics().updateGetKeyFailureStats(startNanos); @@ -699,13 +726,19 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, public Response delete( @PathParam("bucket") String bucketName, @PathParam("path") String keyPath, - @QueryParam("uploadId") @DefaultValue("") String uploadId) throws + @QueryParam("uploadId") @DefaultValue("") String uploadId, + @QueryParam("tagging") String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; try { OzoneVolume volume = getVolume(); + if (taggingMarker != null) { + s3GAction = S3GAction.DELETE_OBJECT_TAGGING; + return deleteObjectTagging(volume, bucketName, keyPath); + } + if (uploadId != null && !uploadId.equals("")) { s3GAction = S3GAction.ABORT_MULTIPART_UPLOAD; return abortMultipartUpload(volume, bucketName, keyPath, uploadId); @@ -732,13 +765,18 @@ public Response delete( // keys. Just return 204 } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); + } else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) { + // When deleteObjectTagging operation is applied on FSO directory + throw S3ErrorTable.newError(S3ErrorTable.NOT_IMPLEMENTED, keyPath); } else { throw ex; } } catch (Exception ex) { AUDIT.logWriteFailure( buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); - if (uploadId != null && !uploadId.equals("")) { + if (taggingMarker != null) { + getMetrics().updateDeleteObjectTaggingFailureStats(startNanos); + } else if (uploadId != null && !uploadId.equals("")) { getMetrics().updateAbortMultipartUploadFailureStats(startNanos); } else { getMetrics().updateDeleteKeyFailureStats(startNanos); @@ -997,7 +1035,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge( - sourceObject, ozoneOutputStream, 0, length); + sourceObject, ozoneOutputStream, 0, length, new byte[getIOBufferSize(length)]); ozoneOutputStream.getMetadata() .putAll(sourceKeyDetails.getMetadata()); outputStream = ozoneOutputStream; @@ -1008,7 +1046,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); - copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream); + copyLength = IOUtils.copy(sourceObject, ozoneOutputStream, getIOBufferSize(length)); ozoneOutputStream.getMetadata() .putAll(sourceKeyDetails.getMetadata()); outputStream = ozoneOutputStream; @@ -1024,7 +1062,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); - putLength = IOUtils.copyLarge(digestInputStream, ozoneOutputStream); + putLength = IOUtils.copy(digestInputStream, ozoneOutputStream, getIOBufferSize(length)); byte[] digest = digestInputStream.getMessageDigest().digest(); ozoneOutputStream.getMetadata() .put(ETAG, DatatypeConverter.printHexBinary(digest).toLowerCase()); @@ -1178,7 +1216,7 @@ void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, long metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - copyLength = IOUtils.copyLarge(src, dest); + copyLength = IOUtils.copy(src, dest, getIOBufferSize(srcKeyLen)); String eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase(); dest.getMetadata().put(ETAG, eTag); } @@ -1381,6 +1419,75 @@ public static boolean checkCopySourceModificationTime( (lastModificationTime <= copySourceIfUnmodifiedSince); } + private Response putObjectTagging(OzoneVolume volume, String bucketName, String keyName, InputStream body) + throws IOException, OS3Exception { + long startNanos = Time.monotonicNowNanos(); + S3Tagging tagging = null; + try { + tagging = new PutTaggingUnmarshaller().readFrom(body); + tagging.validate(); + } catch (Exception ex) { + OS3Exception exception = S3ErrorTable.newError(S3ErrorTable.MALFORMED_XML, keyName); + exception.setErrorMessage(exception.getErrorMessage() + ". " + ex.getMessage()); + throw exception; + } + + Map tags = validateAndGetTagging( + tagging.getTagSet().getTags(), // Nullity check was done in previous parsing step + Tag::getKey, + Tag::getValue + ); + + try { + volume.getBucket(bucketName).putObjectTagging(keyName, tags); + } catch (OMException ex) { + if (ex.getResult() == ResultCodes.INVALID_REQUEST) { + throw S3ErrorTable.newError(S3ErrorTable.INVALID_REQUEST, keyName); + } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { + throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyName); + } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { + throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName); + } else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) { + // When putObjectTagging operation is applied on FSO directory + throw S3ErrorTable.newError(S3ErrorTable.NOT_IMPLEMENTED, keyName); + } + throw ex; + } + getMetrics().updatePutObjectTaggingSuccessStats(startNanos); + return Response.ok().build(); + } + + private Response getObjectTagging(String bucketName, String keyName) throws IOException { + long startNanos = Time.monotonicNowNanos(); + + OzoneVolume volume = getVolume(); + + Map tagMap = volume.getBucket(bucketName).getObjectTagging(keyName); + + getMetrics().updateGetObjectTaggingSuccessStats(startNanos); + return Response.ok(S3Tagging.fromMap(tagMap), MediaType.APPLICATION_XML_TYPE).build(); + } + + private Response deleteObjectTagging(OzoneVolume volume, String bucketName, String keyName) + throws IOException, OS3Exception { + long startNanos = Time.monotonicNowNanos(); + + try { + volume.getBucket(bucketName).deleteObjectTagging(keyName); + } catch (OMException ex) { + // Unlike normal key deletion that ignores the key not found exception + // DeleteObjectTagging should throw the exception if the key does not exist + if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { + throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName); + } + throw ex; + } + + getMetrics().updateDeleteObjectTaggingSuccessStats(startNanos); + return Response.noContent().build(); + } + + @VisibleForTesting public void setOzoneConfiguration(OzoneConfiguration config) { this.ozoneConfiguration = config; @@ -1408,4 +1515,18 @@ private String extractPartsCount(String eTag) { } return null; } + + private int getIOBufferSize(long fileLength) { + if (bufferSize == 0) { + // this is mainly for unit tests as init() will not be called in the unit tests + LOG.warn("buffer size is set to {}", IOUtils.DEFAULT_BUFFER_SIZE); + bufferSize = IOUtils.DEFAULT_BUFFER_SIZE; + } + if (fileLength == 0) { + // for empty file + return bufferSize; + } else { + return fileLength < bufferSize ? (int) fileLength : bufferSize; + } + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index cb9499aa20d..f5d185fc76b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -61,12 +61,13 @@ public static Pair put( OzoneBucket bucket, String keyPath, long length, ReplicationConfig replicationConfig, int chunkSize, Map keyMetadata, + Map tags, DigestInputStream body, PerformanceStringBuilder perf) throws IOException, OS3Exception { try { return putKeyWithStream(bucket, keyPath, - length, chunkSize, replicationConfig, keyMetadata, body, perf); + length, chunkSize, replicationConfig, keyMetadata, tags, body, perf); } catch (IOException ex) { LOG.error("Exception occurred in PutObject", ex); if (ex instanceof OMException) { @@ -97,13 +98,14 @@ public static Pair putKeyWithStream( int bufferSize, ReplicationConfig replicationConfig, Map keyMetadata, + Map tags, DigestInputStream body, PerformanceStringBuilder perf) throws IOException { long startNanos = Time.monotonicNowNanos(); long writeLen; String eTag; try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath, - length, replicationConfig, keyMetadata)) { + length, replicationConfig, keyMetadata, tags)) { long metadataLatencyNs = METRICS.updatePutKeyMetadataStats(startNanos); writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length); eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java index 3fa6149815e..df15a87428e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java @@ -17,69 +17,16 @@ */ package org.apache.hadoop.ozone.s3.endpoint; -import org.xml.sax.InputSource; -import org.xml.sax.XMLReader; - -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; import javax.ws.rs.ext.Provider; -import javax.xml.XMLConstants; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.UnmarshallerHandler; -import javax.xml.parsers.SAXParserFactory; -import java.io.IOException; -import java.io.InputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; /** * Custom unmarshaller to read PutBucketAclRequest wo namespace. */ @Provider -public class PutBucketAclRequestUnmarshaller - implements MessageBodyReader { - - private final JAXBContext context; - private final SAXParserFactory saxParserFactory; +public class PutBucketAclRequestUnmarshaller extends MessageUnmarshaller { public PutBucketAclRequestUnmarshaller() { - try { - context = JAXBContext.newInstance(S3BucketAcl.class); - saxParserFactory = SAXParserFactory.newInstance(); - saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - } catch (Exception ex) { - throw new AssertionError("Can not instantiate " + - "PutBucketAclRequest parser", ex); - } - } - @Override - public boolean isReadable(Class aClass, Type type, - Annotation[] annotations, MediaType mediaType) { - return type.equals(S3BucketAcl.class); + super(S3BucketAcl.class); } - @Override - public S3BucketAcl readFrom( - Class aClass, Type type, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap multivaluedMap, - InputStream inputStream) throws IOException, WebApplicationException { - try { - XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); - UnmarshallerHandler unmarshallerHandler = - context.createUnmarshaller().getUnmarshallerHandler(); - XmlNamespaceFilter filter = - new XmlNamespaceFilter(S3_XML_NAMESPACE); - filter.setContentHandler(unmarshallerHandler); - filter.setParent(xmlReader); - filter.parse(new InputSource(inputStream)); - return (S3BucketAcl)(unmarshallerHandler.getResult()); - } catch (Exception e) { - throw new WebApplicationException("Can't parse request body to XML.", e); - } - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java similarity index 72% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java rename to hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java index 47c94e03cb2..f0db9fda9e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -15,17 +15,15 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; - -import org.junit.jupiter.api.BeforeAll; +package org.apache.hadoop.ozone.s3.endpoint; /** - * Tests key output stream with zero-copy enabled. + * Custom unmarshaller to read Tagging request body. */ -public class TestECKeyOutputStreamWithZeroCopy extends - AbstractTestECKeyOutputStream { - @BeforeAll - public static void init() throws Exception { - init(true); +public class PutTaggingUnmarshaller extends MessageUnmarshaller { + + public PutTaggingUnmarshaller() { + super(S3Tagging.class); } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java new file mode 100644 index 00000000000..0a0f289f1d8 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * S3 tagging. + */ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "Tagging", + namespace = "http://s3.amazonaws.com/doc/2006-03-01/") +public class S3Tagging { + + @XmlElement(name = "TagSet") + private TagSet tagSet; + + public S3Tagging() { + + } + + public S3Tagging(TagSet tagSet) { + this.tagSet = tagSet; + } + + public TagSet getTagSet() { + return tagSet; + } + + public void setTagSet(TagSet tagSet) { + this.tagSet = tagSet; + } + + /** + * Entity for child element TagSet. + */ + @XmlAccessorType(XmlAccessType.FIELD) + @XmlRootElement(name = "TagSet") + public static class TagSet { + @XmlElement(name = "Tag") + private List tags = new ArrayList<>(); + + public TagSet() { + } + + public TagSet(List tags) { + this.tags = tags; + } + + public List getTags() { + return tags; + } + + public void setTags(List tags) { + this.tags = tags; + } + } + + /** + * Entity for child element Tag. + */ + @XmlAccessorType(XmlAccessType.FIELD) + @XmlRootElement(name = "Tag") + public static class Tag { + @XmlElement(name = "Key") + private String key; + + @XmlElement(name = "Value") + private String value; + + public Tag() { + } + + public Tag(String key, String value) { + this.key = key; + this.value = value; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + } + + /** + * Creates a S3 tagging instance (xml representation) from a Map retrieved + * from OM. + * @param tagMap Map representing the tags. + * @return {@link S3Tagging} + */ + public static S3Tagging fromMap(Map tagMap) { + List tags = tagMap.entrySet() + .stream() + .map( + tagEntry -> new Tag(tagEntry.getKey(), tagEntry.getValue()) + ) + .collect(Collectors.toList()); + return new S3Tagging(new TagSet(tags)); + } + + /** + * Additional XML validation logic for S3 tagging. + */ + public void validate() { + if (tagSet == null) { + throw new IllegalArgumentException("TagSet needs to be specified"); + } + + if (tagSet.getTags().isEmpty()) { + throw new IllegalArgumentException("Tags need to be specified and cannot be empty"); + } + + for (Tag tag: tagSet.getTags()) { + if (tag.getKey() == null) { + throw new IllegalArgumentException("Some tag keys are not specified"); + } + if (tag.getValue() == null) { + throw new IllegalArgumentException("Tag value for tag " + tag.getKey() + " is not specified"); + } + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java index 810aa2085f4..3660457146f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java @@ -158,4 +158,9 @@ public String toXml() { this.getErrorMessage(), this.getResource(), this.getRequestId()); } + + /** Create a copy with specific message. */ + public OS3Exception withMessage(String message) { + return new OS3Exception(code, message, httpCode); + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 42c044086b8..49761f89a3a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -145,6 +145,10 @@ private S3ErrorTable() { public static final OS3Exception NO_SUCH_TAG_SET = new OS3Exception( "NoSuchTagSet", "The specified tag does not exist.", HTTP_NOT_FOUND); + public static final OS3Exception MALFORMED_XML = new OS3Exception( + "MalformedXML", "The XML you provided was not well-formed or did not " + + "validate against our published schema", HTTP_BAD_REQUEST); + public static OS3Exception newError(OS3Exception e, String resource) { return newError(e, resource, null); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java index dd84d019176..49edc4d543e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -33,12 +34,15 @@ import org.apache.hadoop.ozone.util.PerformanceMetrics; import org.apache.hadoop.util.Time; +import java.io.Closeable; +import java.util.Map; + /** * This class maintains S3 Gateway related metrics. */ @InterfaceAudience.Private @Metrics(about = "S3 Gateway Metrics", context = OzoneConsts.OZONE) -public final class S3GatewayMetrics implements MetricsSource { +public final class S3GatewayMetrics implements Closeable, MetricsSource { public static final String SOURCE_NAME = S3GatewayMetrics.class.getSimpleName(); @@ -91,6 +95,14 @@ public final class S3GatewayMetrics implements MetricsSource { private @Metric MutableCounterLong copyObjectSuccessLength; private @Metric MutableCounterLong putKeySuccessLength; private @Metric MutableCounterLong getKeySuccessLength; + private @Metric MutableCounterLong getObjectTaggingSuccess; + private @Metric MutableCounterLong getObjectTaggingFailure; + private @Metric MutableCounterLong putObjectTaggingSuccess; + private @Metric MutableCounterLong putObjectTaggingFailure; + private @Metric MutableCounterLong deleteObjectTaggingSuccess; + private @Metric MutableCounterLong deleteObjectTaggingFailure; + private @Metric MutableCounterLong putObjectAclSuccess; + private @Metric MutableCounterLong putObjectAclFailure; // S3 Gateway Latency Metrics // BucketEndpoint @@ -242,6 +254,34 @@ public final class S3GatewayMetrics implements MetricsSource { @Metric(about = "Latency for copy metadata of an key in nanoseconds") private PerformanceMetrics copyKeyMetadataLatencyNs; + @Metric(about = "Latency for successful get object tagging of a key in nanoseconds") + private PerformanceMetrics getObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to get object tagging of a key in nanoseconds") + private PerformanceMetrics getObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successful put object tagging of a key in nanoseconds") + private PerformanceMetrics putObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to put object tagging of a key in nanoseconds") + private PerformanceMetrics putObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successful delete object tagging of a key in nanoseconds") + private PerformanceMetrics deleteObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to delete object tagging of a key in nanoseconds") + private PerformanceMetrics deleteObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successfully setting an S3 object ACL " + + "in nanoseconds") + private PerformanceMetrics putObjectAclSuccessLatencyNs; + + @Metric(about = "Latency for failing to set an S3 object ACL " + + "in nanoseconds") + private PerformanceMetrics putObjectAclFailureLatencyNs; + + private final Map performanceMetrics; + /** * Private constructor. */ @@ -249,10 +289,15 @@ private S3GatewayMetrics(OzoneConfiguration conf) { this.registry = new MetricsRegistry(SOURCE_NAME); int[] intervals = conf.getInts(S3GatewayConfigKeys .OZONE_S3G_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); - PerformanceMetrics.initializeMetrics( + performanceMetrics = PerformanceMetrics.initializeMetrics( this, registry, "Ops", "Time", intervals); } + @Override + public void close() { + IOUtils.closeQuietly(performanceMetrics.values()); + } + /** * Create and returns S3 Gateway Metrics instance. * @@ -272,6 +317,7 @@ public static synchronized S3GatewayMetrics create(OzoneConfiguration conf) { * Unregister the metrics instance. */ public static void unRegister() { + IOUtils.closeQuietly(instance); instance = null; MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); @@ -363,6 +409,20 @@ public void getMetrics(MetricsCollector collector, boolean all) { putKeySuccessLength.snapshot(recordBuilder, true); getKeySuccessLength.snapshot(recordBuilder, true); listKeyCount.snapshot(recordBuilder, true); + getObjectTaggingSuccess.snapshot(recordBuilder, true); + getObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + getObjectTaggingFailure.snapshot(recordBuilder, true); + getObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + putObjectTaggingSuccess.snapshot(recordBuilder, true); + putObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + putObjectTaggingFailure.snapshot(recordBuilder, true); + putObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + deleteObjectTaggingSuccess.snapshot(recordBuilder, true); + deleteObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + deleteObjectTaggingFailure.snapshot(recordBuilder, true); + deleteObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + putObjectAclSuccess.snapshot(recordBuilder, true); + putObjectAclFailure.snapshot(recordBuilder, true); } // INC and UPDATE @@ -584,6 +644,46 @@ public void incGetKeySuccessLength(long bytes) { getKeySuccessLength.incr(bytes); } + public void updateGetObjectTaggingSuccessStats(long startNanos) { + this.getObjectTaggingSuccess.incr(); + this.getObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateGetObjectTaggingFailureStats(long startNanos) { + this.getObjectTaggingFailure.incr(); + this.getObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectTaggingSuccessStats(long startNanos) { + this.putObjectTaggingSuccess.incr(); + this.putObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectTaggingFailureStats(long startNanos) { + this.putObjectTaggingFailure.incr(); + this.putObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateDeleteObjectTaggingSuccessStats(long startNanos) { + this.deleteObjectTaggingSuccess.incr(); + this.deleteObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateDeleteObjectTaggingFailureStats(long startNanos) { + this.deleteObjectTaggingFailure.incr(); + this.deleteObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectAclSuccessStats(long startNanos) { + this.putObjectAclSuccess.incr(); + this.putObjectAclSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectAclFailureStats(long startNanos) { + this.putObjectAclFailure.incr(); + this.putObjectAclFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + // GET public long getListS3BucketsSuccess() { return listS3BucketsSuccess.value(); @@ -725,6 +825,30 @@ public long getListS3BucketsFailure() { return listS3BucketsFailure.value(); } + public long getGetObjectTaggingSuccess() { + return getObjectTaggingSuccess.value(); + } + + public long getGetObjectTaggingFailure() { + return getObjectTaggingFailure.value(); + } + + public long getPutObjectTaggingSuccess() { + return putObjectTaggingSuccess.value(); + } + + public long getPutObjectTaggingFailure() { + return putObjectTaggingFailure.value(); + } + + public long getDeleteObjectTaggingSuccess() { + return deleteObjectTaggingSuccess.value(); + } + + public long getDeleteObjectTaggingFailure() { + return deleteObjectTaggingFailure.value(); + } + private long updateAndGetStats(PerformanceMetrics metric, long startNanos) { long value = Time.monotonicNowNanos() - startNanos; metric.add(value); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java index 43a1e6b7130..d517154de80 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java @@ -45,7 +45,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; /** - * Parser to process AWS V2 & V4 auth request. Creates string to sign and auth + * Parser to process AWS V2 and V4 auth request. Creates string to sign and auth * header. For more details refer to AWS documentation https://docs.aws * .amazon.com/general/latest/gr/sigv4-create-canonical-request.html. **/ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java index be9ecce7c0f..2746de8e5c4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.s3.signature; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,7 +52,7 @@ public class Credential { * Sample credential value: * Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request * - * @throws OS3Exception + * @throws MalformedResourceException */ @SuppressWarnings("StringSplitter") public void parseCredential() throws MalformedResourceException { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index cbdbef0e0a1..7b82d5c2a70 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -74,6 +74,7 @@ private S3Consts() { public static final String TAG_HEADER = "x-amz-tagging"; public static final String TAG_DIRECTIVE_HEADER = "x-amz-tagging-directive"; public static final String TAG_COUNT_HEADER = "x-amz-tagging-count"; + public static final String AWS_TAG_PREFIX = "aws:"; public static final int TAG_NUM_LIMIT = 10; public static final int TAG_KEY_LENGTH_LIMIT = 128; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java index d644162a8ec..fda298f27dc 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Utils.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; @@ -116,4 +118,11 @@ public static S3StorageType toS3StorageType(String storageType) throw newError(INVALID_ARGUMENT, storageType, ex); } } + + public static WebApplicationException wrapOS3Exception(OS3Exception ex) { + return new WebApplicationException(ex.getErrorMessage(), ex, + Response.status(ex.getHttpCode()) + .entity(ex.toXml()) + .build()); + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java new file mode 100644 index 00000000000..b5c7b242cb5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.ozone.s3secret; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import javax.ws.rs.NameBinding; + +/** + * Annotation to only allow admin users to access the endpoint. + */ +@NameBinding +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE, ElementType.METHOD}) +public @interface S3AdminEndpoint { +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java new file mode 100644 index 00000000000..5ecdfa7c121 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.ozone.s3secret; + + +import javax.inject.Inject; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import javax.ws.rs.ext.Provider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.OzoneAdmins; +import org.apache.hadoop.security.UserGroupInformation; + +import java.io.IOException; +import java.security.Principal; + +/** + * Filter that only allows admin to access endpoints annotated with {@link S3AdminEndpoint}. + * Condition is based on the value of the configuration keys for: + *

      + *
    • ozone.administrators
    • + *
    • ozone.administrators.groups
    • + *
    + */ +@S3AdminEndpoint +@Provider +public class S3SecretAdminFilter implements ContainerRequestFilter { + + @Inject + private OzoneConfiguration conf; + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + final Principal userPrincipal = requestContext.getSecurityContext().getUserPrincipal(); + if (null != userPrincipal) { + UserGroupInformation user = UserGroupInformation.createRemoteUser(userPrincipal.getName()); + if (!OzoneAdmins.isS3Admin(user, conf)) { + requestContext.abortWith(Response.status(Status.FORBIDDEN).build()); + } + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 4ea17d2a2fd..739dadfb28e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -33,7 +33,6 @@ import java.io.IOException; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; -import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED; import static javax.ws.rs.core.Response.Status.NOT_FOUND; /** @@ -41,6 +40,7 @@ */ @Path("/secret") @S3SecretEnabled +@S3AdminEndpoint public class S3SecretManagementEndpoint extends S3SecretEndpointBase { private static final Logger LOG = LoggerFactory.getLogger(S3SecretManagementEndpoint.class); @@ -54,8 +54,7 @@ public Response generate() throws IOException { @Path("/{username}") public Response generate(@PathParam("username") String username) throws IOException { - // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. - return Response.status(METHOD_NOT_ALLOWED).build(); + return generateInternal(username); } private Response generateInternal(@Nullable String username) throws IOException { @@ -95,8 +94,7 @@ public Response revoke() throws IOException { @Path("/{username}") public Response revoke(@PathParam("username") String username) throws IOException { - // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. - return Response.status(METHOD_NOT_ALLOWED).build(); + return revokeInternal(username); } private Response revokeInternal(@Nullable String username) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 41876c6e245..41584c9786d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.TenantStateList; @@ -301,21 +300,6 @@ public List listKeys(String volumeName, String bucketName, return null; } - @Override - public List listTrash(String volumeName, String bucketName, - String startKeyName, - String keyPrefix, int maxKeys) - throws IOException { - return null; - } - - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) - throws IOException { - return false; - } - @Override public OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, String keyName) throws IOException { @@ -787,4 +771,20 @@ public void recoverKey(OmKeyArgs args, long clientID) throws IOException { } + @Override + public Map getObjectTagging(String volumeName, String bucketName, String keyName) throws IOException { + return getBucket(volumeName, bucketName).getObjectTagging(keyName); + } + + @Override + public void putObjectTagging(String volumeName, String bucketName, String keyName, Map tags) + throws IOException { + getBucket(volumeName, bucketName).putObjectTagging(keyName, tags); + } + + @Override + public void deleteObjectTagging(String volumeName, String bucketName, String keyName) throws IOException { + getBucket(volumeName, bucketName).deleteObjectTagging(keyName); + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 06b6a8efb71..21f2414c0a7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -596,6 +596,37 @@ public boolean setAcl(List acls) throws IOException { return aclList.addAll(acls); } + @Override + public Map getObjectTagging(String keyName) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + return ozoneKeyDetails.getTags(); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + + @Override + public void putObjectTagging(String keyName, Map tags) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + ozoneKeyDetails.getTags().clear(); + ozoneKeyDetails.getTags().putAll(tags); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + + @Override + public void deleteObjectTagging(String keyName) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + ozoneKeyDetails.getTags().clear(); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + /** * Class used to hold part information in a upload part request. */ diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 9c107bdb5b1..1356b50ad35 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -69,13 +69,13 @@ public void testAbortMultipartUpload() throws Exception { // Abort multipart upload - response = rest.delete(bucket, key, uploadID); + response = rest.delete(bucket, key, uploadID, null); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.delete(bucket, key, "random"); + rest.delete(bucket, key, "random", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 677367e6d81..489aa5d91c3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -77,17 +77,17 @@ public static void setUp() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 2, uploadID, body); + content.length(), 2, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 3, uploadID, body); + content.length(), 3, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -95,7 +95,7 @@ public static void setUp() throws Exception { @Test public void testListParts() throws Exception { Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 3, "0"); + uploadID, 3, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -108,7 +108,7 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0"); + uploadID, 2, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -117,7 +117,7 @@ public void testListPartsContinuation() throws Exception { // Continue response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, - Integer.toString(listPartsResponse.getNextPartNumberMarker())); + Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -129,7 +129,7 @@ public void testListPartsContinuation() throws Exception { public void testListPartsWithUnknownUploadID() throws Exception { try { REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0"); + uploadID, 2, "0", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index b23dbfb9c05..4c5e2b53d90 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -109,7 +109,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index d9595aeff79..6894fc4abea 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -330,7 +330,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -375,7 +375,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, - uploadID, body); + uploadID, null, null, body); assertEquals(200, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); @@ -402,7 +402,7 @@ public void testUploadWithRangeCopyContentLength() OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - REST.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, body); + REST.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); OzoneMultipartUploadPartListParts parts = CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 90695f03ff9..340ed1984ec 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -51,7 +51,7 @@ public void delete() throws IOException, OS3Exception { rest.setOzoneConfiguration(new OzoneConfiguration()); //WHEN - rest.delete("b1", "key1", null); + rest.delete("b1", "key1", null, null); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index 8cf8da95cf8..048faabcef0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -95,11 +95,11 @@ public void init() throws OS3Exception, IOException { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); context = mock(ContainerRequestContext.class); when(context.getUriInfo()).thenReturn(mock(UriInfo.class)); @@ -111,7 +111,7 @@ public void init() throws OS3Exception, IOException { @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -133,7 +133,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -155,7 +155,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -188,7 +188,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { when(context.getUriInfo().getQueryParameters()) .thenReturn(queryParameter); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -209,13 +209,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -228,7 +228,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -236,7 +236,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -270,7 +270,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null)); + () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 8cde144a374..a36d756ddaa 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -31,6 +31,7 @@ import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; + import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; @@ -79,6 +80,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; @@ -155,7 +157,7 @@ void testPutObject(int length, ReplicationConfig replication) throws IOException bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body); + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, null, null, body); //THEN assertEquals(200, response.getStatus()); @@ -182,7 +184,7 @@ void testPutObjectContentLength() throws IOException, OS3Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); long dataSize = CONTENT.length(); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, null, null, body); assertEquals(dataSize, getKeyDataSize()); } @@ -199,8 +201,8 @@ void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, - new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, null, + null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertEquals(15, getKeyDataSize()); } @@ -214,7 +216,7 @@ public void testPutObjectWithTags() throws IOException, OS3Exception { objectEndpoint.setHeaders(headersWithTags); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); assertEquals(200, response.getStatus()); @@ -237,7 +239,7 @@ public void testPutObjectWithOnlyTagKey() throws Exception { try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with invalid query param should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -255,7 +257,7 @@ public void testPutObjectWithDuplicateTagKey() throws Exception { objectEndpoint.setHeaders(headersWithDuplicateTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with duplicate tag key should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -274,7 +276,7 @@ public void testPutObjectWithLongTagKey() throws Exception { objectEndpoint.setHeaders(headersWithLongTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with tag key exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -293,7 +295,7 @@ public void testPutObjectWithLongTagValue() throws Exception { when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with tag value exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -318,7 +320,7 @@ public void testPutObjectWithTooManyTags() throws Exception { objectEndpoint.setHeaders(headersWithTooManyTags); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with number of tags exceeding limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -347,7 +349,7 @@ void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //WHEN Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - chunkedContent.length(), 1, null, + chunkedContent.length(), 1, null, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN @@ -368,7 +370,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception MessageDigest messageDigest = mock(MessageDigest.class); try (MockedStatic mocked = mockStatic(IOUtils.class)) { // For example, EOFException during put-object due to client cancelling the operation before it completes - mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + mocked.when(() -> IOUtils.copy(any(InputStream.class), any(OutputStream.class), anyInt())) .thenThrow(IOException.class); when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); @@ -376,7 +378,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -401,7 +403,7 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -427,7 +429,7 @@ void testCopyObject() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(KEY_NAME)); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, null, body); // Check destination key and response ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) @@ -457,7 +459,7 @@ void testCopyObject() throws IOException, OS3Exception { metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, null, body); ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) .readKey(DEST_KEY); @@ -484,7 +486,7 @@ void testCopyObject() throws IOException, OS3Exception { // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getHttpCode()).isEqualTo(400); assertThat(e.getCode()).isEqualTo("InvalidArgument"); @@ -494,7 +496,7 @@ void testCopyObject() throws IOException, OS3Exception { // source and dest same e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body), + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); @@ -502,28 +504,28 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @@ -535,7 +537,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -553,7 +555,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try (MockedStatic mocked = mockStatic(IOUtils.class)) { // Add the mocked methods only during the copy request when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); - mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + mocked.when(() -> IOUtils.copy(any(InputStream.class), any(OutputStream.class), anyInt())) .thenThrow(IOException.class); // Add copy header, and then call put @@ -562,7 +564,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try { objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -584,7 +586,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { String sourceKeyName = "sourceKey"; Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); @@ -601,7 +603,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(sourceKeyName)); objectEndpoint.setHeaders(headersForCopy); - Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); @@ -620,7 +622,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // With x-amz-tagging-directive = COPY with a different x-amz-tagging when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -635,7 +637,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // Copy object with x-amz-tagging-directive = REPLACE when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -657,7 +659,7 @@ public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); try { - objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, body); + objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, null, null, body); } catch (OS3Exception ex) { assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); @@ -672,7 +674,7 @@ void testInvalidStorageType() { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body)); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); @@ -685,7 +687,7 @@ void testEmptyStorageType() throws IOException, OS3Exception { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); OzoneKeyDetails key = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) .getKey(KEY_NAME); @@ -704,7 +706,7 @@ void testDirectoryCreation() throws IOException, // WHEN try (Response response = objectEndpoint.put(fsoBucket.getName(), path, - 0L, 0, "", null)) { + 0L, 0, "", null, null, null)) { assertEquals(HttpStatus.SC_OK, response.getStatus()); } @@ -719,16 +721,29 @@ void testDirectoryCreationOverFile() throws IOException, OS3Exception { final String path = "key"; final ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", null, null, body); // WHEN final OS3Exception exception = assertThrows(OS3Exception.class, () -> objectEndpoint - .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null) + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null, null, null) .close()); // THEN assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode()); assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode()); } + + @Test + public void testPutEmptyObject() throws IOException, OS3Exception { + HttpHeaders headersWithTags = Mockito.mock(HttpHeaders.class); + String emptyString = ""; + ByteArrayInputStream body = new ByteArrayInputStream(emptyString.getBytes(UTF_8)); + objectEndpoint.setHeaders(headersWithTags); + + Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, null, null, body); + assertEquals(200, putResponse.getStatus()); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + assertEquals(0, keyDetails.getDataSize()); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java new file mode 100644 index 00000000000..91f8869dc91 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_NO_CONTENT; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for DeleteObjectTagging. + */ +public class TestObjectTaggingDelete { + + private static final String CONTENT = "0123456789"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_WITH_TAG = "keyWithTag"; + private HttpHeaders headers; + private ObjectEndpoint rest; + private OzoneClient client; + private ByteArrayInputStream body; + private ContainerRequestContext context; + + @BeforeEach + public void init() throws OS3Exception, IOException { + //GIVEN + OzoneConfiguration config = new OzoneConfiguration(); + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + rest = new ObjectEndpoint(); + rest.setClient(client); + rest.setOzoneConfiguration(config); + headers = Mockito.mock(HttpHeaders.class); + rest.setHeaders(headers); + body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create a key with object tags + Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, null, body); + + + context = Mockito.mock(ContainerRequestContext.class); + Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class)); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(new MultivaluedHashMap<>()); + rest.setContext(context); + } + + @Test + public void testDeleteTagging() throws IOException, OS3Exception { + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); + assertEquals(HTTP_NO_CONTENT, response.getStatus()); + + assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_WITH_TAG).getTags().isEmpty()); + } + + @Test + public void testDeleteTaggingNoKeyFound() throws Exception { + try { + rest.delete(BUCKET_NAME, "nonexistent", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testDeleteTaggingNoBucketFound() throws Exception { + try { + rest.delete("nonexistent", "nonexistent", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } + + @Test + public void testDeleteObjectTaggingNotImplemented() throws Exception { + OzoneClient mockClient = mock(OzoneClient.class); + ObjectStore mockObjectStore = mock(ObjectStore.class); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); + + when(mockClient.getObjectStore()).thenReturn(mockObjectStore); + when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); + + ObjectEndpoint endpoint = new ObjectEndpoint(); + endpoint.setClient(mockClient); + + doThrow(new OMException("DeleteObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); + + try { + endpoint.delete("fsoBucket", "dir/", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); + assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java new file mode 100644 index 00000000000..f379ae71f59 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; + +/** + * Tests for GetObjectTagging. + */ +public class TestObjectTaggingGet { + + private static final String CONTENT = "0123456789"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_WITH_TAG = "keyWithTag"; + private ObjectEndpoint rest; + + @BeforeEach + public void init() throws OS3Exception, IOException { + //GIVEN + OzoneConfiguration config = new OzoneConfiguration(); + OzoneClient client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + rest = new ObjectEndpoint(); + rest.setClient(client); + rest.setOzoneConfiguration(config); + HttpHeaders headers = Mockito.mock(HttpHeaders.class); + rest.setHeaders(headers); + ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create a key with object tags + Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, null, body); + + + ContainerRequestContext context = Mockito.mock(ContainerRequestContext.class); + Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class)); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(new MultivaluedHashMap<>()); + rest.setContext(context); + } + + @Test + public void testGetTagging() throws IOException, OS3Exception { + //WHEN + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); + + assertEquals(HTTP_OK, response.getStatus()); + S3Tagging s3Tagging = (S3Tagging) response.getEntity(); + assertNotNull(s3Tagging); + assertNotNull(s3Tagging.getTagSet()); + assertEquals(2, s3Tagging.getTagSet().getTags().size()); + for (Tag tag: s3Tagging.getTagSet().getTags()) { + if (tag.getKey().equals("tag1")) { + assertEquals("value1", tag.getValue()); + } else if (tag.getKey().equals("tag2")) { + assertEquals("value2", tag.getValue()); + } else { + fail("Unknown tag found"); + } + } + } + + @Test + public void testGetTaggingNoKeyFound() throws Exception { + try { + rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testGetTaggingNoBucketFound() throws Exception { + try { + rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java new file mode 100644 index 00000000000..478ab8ba79f --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import javax.ws.rs.core.HttpHeaders; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_XML; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for PutObjectTagging. + */ +public class TestObjectTaggingPut { + + private OzoneClient clientStub; + private ObjectEndpoint objectEndpoint; + + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key=value/1"; + + @BeforeEach + void setup() throws IOException, OS3Exception { + OzoneConfiguration config = new OzoneConfiguration(); + + //Create client stub and object store stub. + clientStub = new OzoneClientStub(); + + // Create bucket + clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); + + // Create PutObject and setClient to OzoneClientStub + objectEndpoint = new ObjectEndpoint(); + objectEndpoint.setClient(clientStub); + objectEndpoint.setOzoneConfiguration(config); + + HttpHeaders headers = mock(HttpHeaders.class); + ByteArrayInputStream body = + new ByteArrayInputStream("".getBytes(UTF_8)); + objectEndpoint.setHeaders(headers); + + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, null, body); + } + + @Test + public void testPutObjectTaggingWithEmptyBody() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + null); + fail(); + } catch (OS3Exception ex) { + assertEquals(HTTP_BAD_REQUEST, ex.getHttpCode()); + assertEquals(MALFORMED_XML.getCode(), ex.getCode()); + } + } + + @Test + public void testPutValidObjectTagging() throws Exception { + assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, + "", null, twoTags()).getStatus()); + OzoneKeyDetails keyDetails = + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + assertEquals(2, keyDetails.getTags().size()); + assertEquals("val1", keyDetails.getTags().get("tag1")); + assertEquals("val2", keyDetails.getTags().get("tag2")); + } + + @Test + public void testPutInvalidObjectTagging() throws Exception { + testInvalidObjectTagging(this::emptyBody, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::invalidXmlStructure, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::noTagSet, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::emptyTags, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagKeyNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagValueNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + } + + private void testInvalidObjectTagging(Supplier inputStream, + int expectedHttpCode, String expectedErrorCode) throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + inputStream.get()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(expectedHttpCode, ex.getHttpCode()); + assertEquals(expectedErrorCode, ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNoKeyFound() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, + null, "", null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNoBucketFound() throws Exception { + try { + objectEndpoint.put("nonexistent", "nonexistent", 0, 1, + null, "", null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNotImplemented() throws Exception { + OzoneClient mockClient = mock(OzoneClient.class); + ObjectStore mockObjectStore = mock(ObjectStore.class); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); + + when(mockClient.getObjectStore()).thenReturn(mockObjectStore); + when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); + + ObjectEndpoint endpoint = new ObjectEndpoint(); + Map twoTagsMap = new HashMap<>(); + twoTagsMap.put("tag1", "val1"); + twoTagsMap.put("tag2", "val2"); + endpoint.setClient(mockClient); + + doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); + + try { + endpoint.put("fsoBucket", "dir/", 0, 1, null, "", + null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); + assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); + } + } + + private InputStream emptyBody() { + return null; + } + + private InputStream invalidXmlStructure() { + String xml = + "" + + " " + + " "; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream twoTags() { + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + " tag2" + + " val2" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream noTagSet() { + String xml = + "" + + ""; + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream emptyTags() { + String xml = + "" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + public InputStream tagKeyNotSpecified() { + String xml = + "" + + " " + + " " + + " val1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + public InputStream tagValueNotSpecified() { + String xml = + "" + + " " + + " " + + " tag1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } +} + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index aecc56fe172..dbafa8c11cb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -51,6 +51,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.spy; @@ -99,7 +100,7 @@ public void testPartUpload() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -121,7 +122,7 @@ public void testPartUploadWithOverride() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -130,7 +131,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -144,7 +145,7 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, - "random", body); + "random", null, null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -178,7 +179,7 @@ public void testPartUploadStreamContentLength() long contentLength = chunkedContent.length(); objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, - uploadID, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + uploadID, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertContentLength(uploadID, keyName, 15); } @@ -201,7 +202,7 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, keyName, - contentLength, 1, uploadID, body); + contentLength, 1, uploadID, null, null, body); assertContentLength(uploadID, keyName, content.length()); } @@ -234,7 +235,7 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException try (MockedStatic mocked = mockStatic(IOUtils.class)) { // Add the mocked methods only during the copy request when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); - mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + mocked.when(() -> IOUtils.copy(any(InputStream.class), any(OutputStream.class), anyInt())) .thenThrow(IOException.class); String content = "Multipart Upload"; @@ -242,7 +243,7 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(content.getBytes(UTF_8)); try { objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 28ce32e7470..dc844f6463f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -94,7 +94,7 @@ public void testPartUpload() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -115,7 +115,7 @@ public void testPartUploadWithOverride() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -124,7 +124,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -137,7 +137,7 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(S3BUCKET, S3KEY, content.length(), 1, - "random", body); + "random", null, null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index b74808de953..d256a346295 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -38,12 +38,14 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import static java.net.HttpURLConnection.HTTP_FORBIDDEN; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyLong; @@ -245,7 +247,7 @@ public void testGetKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, null, 1000, "marker")); + "bucketName", "keyPath", 0, null, 1000, "marker", null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -261,7 +263,7 @@ public void testPutKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "bucketName", "keyPath", 1024, 0, null, + "bucketName", "keyPath", 1024, 0, null, null, null, new ByteArrayInputStream(new byte[]{}))); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -277,7 +279,7 @@ public void testDeleteKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", null)); + objectEndpoint.delete("bucketName", "keyPath", null, null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -294,4 +296,44 @@ public void testMultiUploadKey() throws IOException { objectEndpoint.initializeMultipartUpload("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } + + @Test + public void testObjectTagging() throws Exception { + when(objectStore.getVolume(anyString())).thenReturn(volume); + when(objectStore.getS3Volume()).thenReturn(volume); + when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); + when(volume.getBucket("bucketName")).thenReturn(bucket); + when(bucket.getObjectTagging(anyString())).thenThrow(exception); + doThrow(exception).when(bucket).putObjectTagging(anyString(), anyMap()); + doThrow(exception).when(bucket).deleteObjectTagging(anyString()); + + ObjectEndpoint objectEndpoint = new ObjectEndpoint(); + objectEndpoint.setClient(client); + + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + + InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); + + OS3Exception e = assertThrows(OS3Exception.class, () -> + objectEndpoint.put("bucketName", "keyPath", 0, 1, + null, "", null, tagInput)); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.delete("bucketName", "keyPath", "", "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.get("bucketName", "keyPath", 0, null, + 0, null, "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index d988b430230..1c0e115a24c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -106,7 +106,7 @@ public void testUpload() throws Exception { byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); ByteArrayInputStream body = new ByteArrayInputStream(keyContent); - Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, body); + Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null, null, body); assertEquals(200, response.getStatus()); } @@ -140,7 +140,7 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); REST.setHeaders(headers); - Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null); + Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null, null, null); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 947b0986c8e..1f6cee2c4a9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -310,7 +310,7 @@ public void testCreateKeySuccess() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); body.close(); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); @@ -322,7 +322,8 @@ public void testCreateKeyFailure() throws Exception { // Create the file in a bucket that does not exist OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - "unknownBucket", keyName, CONTENT.length(), 1, null, null)); + "unknownBucket", keyName, CONTENT.length(), 1, null, null, + null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -334,7 +335,7 @@ public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); bucket.createKey(keyName, 0).close(); - keyEndpoint.delete(bucketName, keyName, null); + keyEndpoint.delete(bucketName, keyName, null, null); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -343,7 +344,7 @@ public void testDeleteKeySuccess() throws Exception { public void testDeleteKeyFailure() throws Exception { long oriMetric = metrics.getDeleteKeyFailure(); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - "unknownBucket", keyName, null)); + "unknownBucket", keyName, null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -358,9 +359,9 @@ public void testGetKeySuccess() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); // GET the key from the bucket - Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null); + Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -373,7 +374,7 @@ public void testGetKeyFailure() throws Exception { // Fetching a non-existent key OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, "unknownKey", 0, null, 0, null)); + bucketName, "unknownKey", 0, null, 0, null, null)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -407,7 +408,7 @@ public void testAbortMultiPartUploadSuccess() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.delete(bucketName, keyName, uploadID); + keyEndpoint.delete(bucketName, keyName, uploadID, null); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -419,7 +420,7 @@ public void testAbortMultiPartUploadFailure() throws Exception { // Fail the Abort Method by providing wrong uploadID OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - bucketName, keyName, "wrongId")); + bucketName, keyName, "wrongId", null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -466,7 +467,7 @@ public void testCreateMultipartKeySuccess() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); keyEndpoint.put(bucketName, keyName, CONTENT.length(), - 1, uploadID, body); + 1, uploadID, null, null, body); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -475,7 +476,7 @@ public void testCreateMultipartKeySuccess() throws Exception { public void testCreateMultipartKeyFailure() throws Exception { long oriMetric = metrics.getCreateMultipartKeyFailure(); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, "randomId", null)); + bucketName, keyName, CONTENT.length(), 1, "randomId", null, null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -490,7 +491,7 @@ public void testListPartsSuccess() throws Exception { // Listing out the parts by providing the uploadID keyEndpoint.get(bucketName, keyName, 0, - uploadID, 3, null); + uploadID, 3, null, null); long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -501,7 +502,7 @@ public void testListPartsFailure() throws Exception { long oriMetric = metrics.getListPartsFailure(); // Listing out the parts by providing the uploadID after aborting OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, keyName, 0, "wrong_id", 3, null)); + bucketName, keyName, 0, "wrong_id", 3, null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); @@ -522,14 +523,14 @@ public void testCopyObject() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); keyEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( bucketName + "/" + urlEncode(keyName)); keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, - null, body); + null, null, null, body); long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -538,13 +539,114 @@ public void testCopyObject() throws Exception { // source and dest same when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body), + bucketName, keyName, CONTENT.length(), 1, null, null, null, body), "Test for CopyObjectMetric failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); } + @Test + public void testPutObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getPutObjectTaggingSuccess(); + + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + + long curMetric = metrics.getPutObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testPutObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getPutObjectTaggingFailure(); + + // Put object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", + null, getPutTaggingBody()) + ); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + + long curMetric = metrics.getPutObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testGetObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getGetObjectTaggingSuccess(); + + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + + // Get object tagging + keyEndpoint.get(bucketName, keyName, 0, + null, 0, null, ""); + + long curMetric = metrics.getGetObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testGetObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getGetObjectTaggingFailure(); + + // Get object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.get(bucketName, "nonexistent", 0, null, + 0, null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + long curMetric = metrics.getGetObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testDeleteObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getDeleteObjectTaggingSuccess(); + + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + + // Delete object tagging + keyEndpoint.delete(bucketName, keyName, null, ""); + + long curMetric = metrics.getDeleteObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testDeleteObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getDeleteObjectTaggingFailure(); + + // Delete object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.delete(bucketName, "nonexistent", null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + long curMetric = metrics.getDeleteObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + private OzoneClient createClientWithKeys(String... keys) throws IOException { for (String key : keys) { bucket.createKey(key, 0).close(); @@ -567,4 +669,18 @@ private String initiateMultipartUpload(String bktName, String key) } return "Invalid-Id"; } + + private static InputStream getPutTaggingBody() { + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java index d1f81faddd2..b548d17d9ff 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -93,7 +92,7 @@ void testSecretGenerate() throws IOException { hasNoSecretYet(); S3SecretResponse response = - (S3SecretResponse) endpoint.generate().getEntity(); + (S3SecretResponse) endpoint.generate().getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(USER_NAME, response.getAwsAccessKey()); @@ -112,12 +111,11 @@ void testIfSecretAlreadyExists() throws IOException { } @Test - @Unhealthy("HDDS-11041") void testSecretGenerateWithUsername() throws IOException { hasNoSecretYet(); S3SecretResponse response = - (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); + (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(OTHER_USER_NAME, response.getAwsAccessKey()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java index 85e6bd4c10e..b26df0e8996 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -98,7 +97,6 @@ void testSecretRevoke() throws IOException { } @Test - @Unhealthy("HDDS-11041") void testSecretRevokeWithUsername() throws IOException { endpoint.revoke(OTHER_USER_NAME); verify(objectStore, times(1)) diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 04c1c8602cb..8ea8ded01ce 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -20,14 +20,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-tools - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Tools Apache Ozone Tools jar + + false + + org.apache.ozone @@ -160,11 +164,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-tools + + org.apache.ratis + ratis-shell + info.picocli picocli + + info.picocli + picocli-shell-jline3 + + + org.jline + jline + jakarta.xml.bind jakarta.xml.bind-api @@ -267,21 +283,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-compiler-plugin - - org.apache.ozone - hdds-config - ${hdds.version} - org.kohsuke.metainf-services metainf-services ${metainf-services.version} + + info.picocli + picocli-codegen + ${picocli.version} + - org.apache.hadoop.hdds.conf.ConfigFileGenerator org.kohsuke.metainf_services.AnnotationProcessorImpl + picocli.codegen.aot.graalvm.processor.NativeImageConfigGeneratorProcessor + + -Aproject=${project.groupId}/${project.artifactId} + @@ -295,8 +314,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator + org.apache.hadoop.hdds.conf.Config + org.apache.hadoop.hdds.conf.ConfigGroup org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java index 991099f2702..ef9be49abfb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java @@ -18,10 +18,9 @@ package org.apache.hadoop.ozone.admin.nssummary; import org.apache.hadoop.fs.ozone.OzoneClientUtils; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.HttpConfig; @@ -62,29 +61,15 @@ QuotaUsageSubCommand.class, FileSizeDistSubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class NSSummaryAdmin extends GenericCli implements SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class NSSummaryAdmin implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - public OzoneAdmin getParent() { return parent; } - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - private boolean isObjectStoreBucket(OzoneBucket bucket, ObjectStore objectStore) { boolean enableFileSystemPaths = getOzoneConfig() .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, @@ -108,7 +93,6 @@ private boolean isObjectStoreBucket(OzoneBucket bucket, ObjectStore objectStore) * Returns false if bucket is part of path but not a OBS bucket. * @param path * @return true if bucket is OBS bucket or not part of provided path. - * @throws IOException */ public boolean isNotValidBucketOrOBSBucket(String path) { OFSPath ofsPath = new OFSPath(path, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java index 8a159adb644..ca6fa428fe2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/CancelPrepareSubCommand.java @@ -38,7 +38,7 @@ public class CancelPrepareSubCommand implements Callable { @CommandLine.Option( names = {"-id", "--service-id"}, description = "Ozone Manager Service ID", - required = true + required = false ) private String omServiceId; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java index 2a25dfbd103..2b23ad9f536 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java @@ -18,17 +18,19 @@ package org.apache.hadoop.ozone.admin.om; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.JsonUtils; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; +import org.apache.hadoop.ozone.utils.FormattingCLIUtils; import picocli.CommandLine; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -57,19 +59,40 @@ public class GetServiceRolesSubcommand implements Callable { description = "Format output as JSON") private boolean json; + @CommandLine.Option(names = { "--table" }, + defaultValue = "false", + description = "Format output as Table") + private boolean table; + private OzoneManagerProtocol ozoneManagerClient; + private static final String OM_ROLES_TITLE = "Ozone Manager Roles"; + + private static final List OM_ROLES_HEADER = Arrays.asList( + "Host Name", "Node ID", "Role"); + @Override public Void call() throws Exception { try { ozoneManagerClient = parent.createOmClient(omServiceId); if (json) { printOmServerRolesAsJson(ozoneManagerClient.getServiceList()); + } else if (table) { + FormattingCLIUtils formattingCLIUtils = new FormattingCLIUtils(OM_ROLES_TITLE) + .addHeaders(OM_ROLES_HEADER); + List serviceList = ozoneManagerClient.getServiceList(); + for (ServiceInfo serviceInfo : serviceList) { + OMRoleInfo omRoleInfo = serviceInfo.getOmRoleInfo(); + if (omRoleInfo != null && + serviceInfo.getNodeType() == HddsProtos.NodeType.OM) { + formattingCLIUtils.addLine(new String[]{serviceInfo.getHostname(), + omRoleInfo.getNodeId(), omRoleInfo.getServerRole()}); + } + } + System.out.println(formattingCLIUtils.render()); } else { printOmServerRoles(ozoneManagerClient.getServiceList()); } - } catch (OzoneClientException ex) { - System.out.printf("Error: %s", ex.getMessage()); } finally { if (ozoneManagerClient != null) { ozoneManagerClient.close(); @@ -110,4 +133,14 @@ private void printOmServerRolesAsJson(List serviceList) System.out.print( JsonUtils.toJsonStringWithDefaultPrettyPrinter(omServiceList)); } + + @VisibleForTesting + public void setOzoneManagerClient(OzoneManagerProtocol ozoneManagerClient) { + this.ozoneManagerClient = ozoneManagerClient; + } + + @VisibleForTesting + public void setParent(OMAdmin parent) { + this.parent = parent; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index 3162c556354..9076ce9bf7d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.ozone.admin.om; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -38,8 +37,6 @@ import org.apache.ratis.protocol.ClientId; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; import java.util.Collection; @@ -63,25 +60,16 @@ TransferOmLeaderSubCommand.class, FetchKeySubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class OMAdmin extends GenericCli implements SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class OMAdmin implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - @Spec - private CommandSpec spec; - public OzoneAdmin getParent() { return parent; } - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - public ClientProtocol createClient(String omServiceId) throws Exception { OzoneConfiguration conf = parent.getOzoneConf(); if (OmUtils.isOmHAServiceId(conf, omServiceId)) { @@ -146,9 +134,4 @@ private Collection getConfiguredServiceIds() { conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); return omServiceIds; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java index 4234ee29d12..d2d2f1bf044 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java @@ -20,7 +20,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import picocli.CommandLine; @@ -86,9 +85,6 @@ public Void call() throws Exception { System.err.println("Operation completed with errors. " + "Check OM log for details"); } - - } catch (OzoneClientException ex) { - System.err.printf("Error: %s", ex.getMessage()); } return null; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java index 0a2666d30ee..b8ea45898d7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java @@ -28,6 +28,7 @@ /** * An abstract Class use to ReconfigureSubCommand. */ +@CommandLine.Command public abstract class AbstractReconfigureSubCommand implements Callable { @CommandLine.ParentCommand private ReconfigureCommands parent; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java index fc171e52d8d..d14102c4e8a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.ozone.admin.reconfig; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -47,9 +47,8 @@ ReconfigureStatusSubcommand.class, ReconfigurePropertiesSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ReconfigureCommands implements Callable, - SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ReconfigureCommands implements Callable, AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; @@ -87,11 +86,6 @@ public HddsProtos.NodeType getService() { return HddsProtos.NodeType.valueOf(service); } - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - public boolean isBatchReconfigDatanodes() { return batchReconfigDatanodes; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java index 480133e59b4..da74083de3b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.admin.scm; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -27,6 +28,7 @@ import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.server.JsonUtils; +import org.apache.hadoop.ozone.utils.FormattingCLIUtils; import picocli.CommandLine; import static java.lang.System.err; @@ -50,13 +52,44 @@ public class GetScmRatisRolesSubcommand extends ScmSubcommand { description = "Format output as JSON") private boolean json; + @CommandLine.Option(names = { "--table" }, + defaultValue = "false", + description = "Format output as Table") + private boolean table; + + private static final String SCM_ROLES_TITLE = "Storage Container Manager Roles"; + + private static final List RATIS_SCM_ROLES_HEADER = Arrays.asList( + "Host Name", "Ratis Port", "Role", "Node ID", "Host Address"); + + private static final List STANDALONE_SCM_ROLES_HEADER = Arrays.asList("Host Name", "Port"); + @Override - protected void execute(ScmClient scmClient) throws IOException { + public void execute(ScmClient scmClient) throws IOException { List ratisRoles = scmClient.getScmRatisRoles(); + boolean isRatisEnabled = scmClient.isScmRatisEnable(); if (json) { Map> scmRoles = parseScmRoles(ratisRoles); System.out.print( JsonUtils.toJsonStringWithDefaultPrettyPrinter(scmRoles)); + } else if (table) { + FormattingCLIUtils formattingCLIUtils = new FormattingCLIUtils(SCM_ROLES_TITLE); + + // Determine which header to use based on whether Ratis is enabled or not. + if (isRatisEnabled) { + formattingCLIUtils.addHeaders(RATIS_SCM_ROLES_HEADER); + } else { + formattingCLIUtils.addHeaders(STANDALONE_SCM_ROLES_HEADER); + } + + for (String role : ratisRoles) { + String[] roleItems = role.split(":"); + if (roleItems.length < 2) { + err.println("Invalid response received for ScmRatisRoles."); + } + formattingCLIUtils.addLine(roleItems); + } + System.out.println(formattingCLIUtils.render()); } else { for (String role: ratisRoles) { System.out.println(role); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java index 98eba154b25..996485b13fd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java @@ -17,14 +17,11 @@ */ package org.apache.hadoop.ozone.admin.scm; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand for admin operations related to SCM. @@ -43,28 +40,13 @@ DecommissionScmSubcommand.class, RotateKeySubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ScmAdmin extends GenericCli implements SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ScmAdmin implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - @Spec - private CommandSpec spec; - public OzoneAdmin getParent() { return parent; } - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java index 18c13d67cf2..175fc03e398 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java @@ -18,9 +18,8 @@ package org.apache.hadoop.ozone.debug; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.Handler; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.kohsuke.MetaInfServices; @@ -36,9 +35,9 @@ name = "print-log-dag", aliases = "pld", description = "Create an image of the current compaction log DAG in OM.") -@MetaInfServices(SubcommandWithParent.class) +@MetaInfServices(DebugSubcommand.class) public class CompactionLogDagPrinter extends Handler - implements SubcommandWithParent { + implements DebugSubcommand { @CommandLine.Option(names = {"-f", "--file-name-prefix"}, description = "Prefix to be use in image file name. (optional)") @@ -56,14 +55,9 @@ public class CompactionLogDagPrinter extends Handler defaultValue = "file_name") private String graphType; - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String message = client.getObjectStore() .printCompactionLogDag(fileNamePrefix, graphType); System.out.println(message); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java index a163cda2502..ca79aa41fa4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java @@ -20,12 +20,16 @@ import java.nio.file.Path; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaOneDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; @@ -48,17 +52,15 @@ public final class DBDefinitionFactory { private DBDefinitionFactory() { } - private static HashMap dbMap; - - private static String dnDBSchemaVersion; + private static final AtomicReference DATANODE_DB_SCHEMA_VERSION = new AtomicReference<>(); + private static final Map DB_MAP; static { - dbMap = new HashMap<>(); - Arrays.asList( - new SCMDBDefinition(), - new OMDBDefinition(), - new ReconSCMDBDefinition() - ).forEach(dbDefinition -> dbMap.put(dbDefinition.getName(), dbDefinition)); + final Map map = new HashMap<>(); + Arrays.asList(SCMDBDefinition.get(), OMDBDefinition.get(), ReconSCMDBDefinition.get(), + WitnessedContainerDBDefinition.get()) + .forEach(dbDefinition -> map.put(dbDefinition.getName(), dbDefinition)); + DB_MAP = Collections.unmodifiableMap(map); } public static DBDefinition getDefinition(String dbName) { @@ -66,10 +68,8 @@ public static DBDefinition getDefinition(String dbName) { if (!dbName.equals(OM_DB_NAME) && dbName.startsWith(OM_DB_NAME)) { dbName = OM_DB_NAME; } - if (dbMap.containsKey(dbName)) { - return dbMap.get(dbName); - } - return getReconDBDefinition(dbName); + final DBDefinition definition = DB_MAP.get(dbName); + return definition != null ? definition : getReconDBDefinition(dbName); } public static DBDefinition getDefinition(Path dbPath, @@ -83,7 +83,7 @@ public static DBDefinition getDefinition(Path dbPath, } String dbName = fileName.toString(); if (dbName.endsWith(OzoneConsts.CONTAINER_DB_SUFFIX)) { - switch (dnDBSchemaVersion) { + switch (DATANODE_DB_SCHEMA_VERSION.get()) { case "V1": return new DatanodeSchemaOneDBDefinition( dbPath.toAbsolutePath().toString(), config); @@ -102,12 +102,12 @@ private static DBDefinition getReconDBDefinition(String dbName) { if (dbName.startsWith(RECON_CONTAINER_KEY_DB)) { return new ReconDBDefinition(dbName); } else if (dbName.startsWith(RECON_OM_SNAPSHOT_DB)) { - return new OMDBDefinition(); + return OMDBDefinition.get(); } return null; } public static void setDnDBSchemaVersion(String dnDBSchemaVersion) { - DBDefinitionFactory.dnDBSchemaVersion = dnDBSchemaVersion; + DATANODE_DB_SCHEMA_VERSION.set(dnDBSchemaVersion); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java index 0c7ba187ce1..21b572fbc4c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.debug; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -74,11 +74,8 @@ @CommandLine.Command(name = "find-missing-padding", aliases = { "fmp" }, description = "List all keys with any missing padding, optionally limited to a volume/bucket/key URI.") -@MetaInfServices(SubcommandWithParent.class) -public class FindMissingPadding extends Handler implements SubcommandWithParent { - - @CommandLine.ParentCommand - private OzoneDebug parent; +@MetaInfServices(DebugSubcommand.class) +public class FindMissingPadding extends Handler implements DebugSubcommand { @CommandLine.Mixin private ScmOption scmOption; @@ -100,11 +97,6 @@ protected OzoneAddress getAddress() throws OzoneClientException { return new OzoneAddress(uri); } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override protected void execute(OzoneClient ozoneClient, OzoneAddress address) throws IOException { findCandidateKeys(ozoneClient, address); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java index a8891404e05..9c3865ae241 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.LeaseRecoverable; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.kohsuke.MetaInfServices; @@ -40,11 +40,8 @@ customSynopsis = "ozone debug recover --path=", description = "recover the lease of a specified file. Make sure to specify " + "file system scheme if ofs:// is not the default.") -@MetaInfServices(SubcommandWithParent.class) -public class LeaseRecoverer implements Callable, SubcommandWithParent { - - @CommandLine.ParentCommand - private OzoneDebug parent; +@MetaInfServices(DebugSubcommand.class) +public class LeaseRecoverer implements Callable, DebugSubcommand { @Spec private CommandSpec spec; @@ -62,11 +59,6 @@ public void setPath(String dbPath) { this.path = dbPath; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override public Void call() throws Exception { OzoneConfiguration configuration = new OzoneConfiguration(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java index 3d6cf570934..164d07f96b4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.debug; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.cli.DebugSubcommand; +import org.apache.hadoop.hdds.cli.ExtensibleParentCommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import picocli.CommandLine; /** @@ -32,35 +32,14 @@ description = "Developer tools for Ozone Debug operations", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneDebug extends GenericCli { +public class OzoneDebug extends GenericCli implements ExtensibleParentCommand { - private OzoneConfiguration ozoneConf; - - public OzoneDebug() { - super(OzoneDebug.class); - } - - @VisibleForTesting - public OzoneDebug(OzoneConfiguration configuration) { - super(OzoneDebug.class); - this.ozoneConf = configuration; - } - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; + public static void main(String[] argv) { + new OzoneDebug().run(argv); } - /** - * Main for the Ozone Debug shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - - new OzoneDebug().run(argv); + @Override + public Class subcommandType() { + return DebugSubcommand.class; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java index fabc7f456ae..cdda3e5e0f9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java @@ -26,7 +26,7 @@ import java.util.concurrent.Callable; import java.nio.file.Path; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; @@ -53,8 +53,8 @@ @CommandLine.Command( name = "prefix", description = "Parse prefix contents") -@MetaInfServices(SubcommandWithParent.class) -public class PrefixParser implements Callable, SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class PrefixParser implements Callable, DebugSubcommand { /** * Types to represent the level or path component type. @@ -101,11 +101,6 @@ public void setDbPath(String dbPath) { this.dbPath = dbPath; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override public Void call() throws Exception { parse(volume, bucket, dbPath, filePath); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index 48ed7c74ae7..c88245a571b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -17,14 +17,13 @@ package org.apache.hadoop.ozone.debug; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -61,8 +60,8 @@ @CommandLine.Command(name = "read-replicas", description = "Reads every replica for all the blocks associated with a " + "given key.") -@MetaInfServices(SubcommandWithParent.class) -public class ReadReplicas extends KeyHandler implements SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class ReadReplicas extends KeyHandler implements DebugSubcommand { @CommandLine.Option(names = {"--outputDir", "-o", "--output-dir"}, description = "Destination where the directory will be created" + @@ -83,14 +82,9 @@ public class ReadReplicas extends KeyHandler implements SubcommandWithParent { private static final String JSON_PROPERTY_REPLICA_UUID = "uuid"; private static final String JSON_PROPERTY_REPLICA_EXCEPTION = "exception"; - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { address.ensureKeyAddress(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java new file mode 100644 index 00000000000..54b2e4c9986 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.debug; + +import com.google.common.collect.ImmutableSortedMap; +import org.apache.hadoop.hdds.ComponentVersion; +import org.apache.hadoop.hdds.DatanodeVersion; +import org.apache.hadoop.hdds.cli.DebugSubcommand; +import org.apache.hadoop.hdds.server.JsonUtils; +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.OzoneManagerVersion; +import org.apache.hadoop.ozone.util.OzoneVersionInfo; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Callable; + +/** Show internal component version information as JSON. */ +@CommandLine.Command( + name = "version", + description = "Show internal version of Ozone components, as defined in the artifacts where this command is " + + "executed. It does not communicate with any Ozone services. Run the same command on different nodes to " + + "get a cross-component view of versions. The goal of this command is to help quickly get a glance of the " + + "latest features supported by Ozone on the current node." +) +@MetaInfServices(DebugSubcommand.class) +public class VersionDebug implements Callable, DebugSubcommand { + + @Override + public Void call() throws IOException { + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(ImmutableSortedMap.of( + "ozone", ImmutableSortedMap.of( + "revision", OzoneVersionInfo.OZONE_VERSION_INFO.getRevision(), + "url", OzoneVersionInfo.OZONE_VERSION_INFO.getUrl(), + "version", OzoneVersionInfo.OZONE_VERSION_INFO.getVersion() + ), + "components", ImmutableSortedMap.of( + "client", asMap(ClientVersion.CURRENT), + "datanode", asMap(DatanodeVersion.CURRENT), + "om", asMap(OzoneManagerVersion.CURRENT) + ) + ))); + return null; + } + + private static & ComponentVersion> Map asMap(T version) { + return ImmutableSortedMap.of( + "componentVersion", ImmutableSortedMap.of( + "name", version.name(), + "protoValue", version.toProtoValue() + ) + ); + } + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDataNodeDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDataNodeDetails.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java index 6019e5806dd..cf6b7d7a11d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDataNodeDetails.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; /** * Class that gives datanode details on which the chunk is present. */ diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDetails.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java index 278c2bf055c..4e2b5314a06 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDetails.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; /** * Class that gives chunkDetails. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java index b5b2364007f..6944c380493 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; import java.io.File; import java.io.IOException; @@ -27,7 +27,7 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -41,8 +41,8 @@ import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.debug.OzoneDebug; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -61,9 +61,9 @@ @Command(name = "chunkinfo", description = "returns chunk location" + " information about an existing key") -@MetaInfServices(SubcommandWithParent.class) +@MetaInfServices(DebugSubcommand.class) public class ChunkKeyHandler extends KeyHandler implements - SubcommandWithParent { + DebugSubcommand { @CommandLine.ParentCommand private OzoneDebug parent; @@ -74,7 +74,7 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { try (ContainerOperationClient containerOperationClient = new ContainerOperationClient(parent.getOzoneConf()); XceiverClientManager xceiverClientManager = containerOperationClient.getXceiverClientManager()) { OzoneManagerProtocol ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); @@ -201,10 +201,4 @@ private boolean isECParityBlock(Pipeline pipeline, DatanodeDetails dn) { return pipeline.getReplicaIndex(dn) > ((ECReplicationConfig) pipeline.getReplicationConfig()).getData(); } - - @Override - public Class getParentType() { - return OzoneDebug.class; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkType.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java similarity index 95% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkType.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java index 610eab54d6f..3af7f810402 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkType.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; /** * The type of chunks of an Erasure Coded key. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java similarity index 98% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java index 130c1bca0fc..1c5fc090b0e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; import com.fasterxml.jackson.annotation.JsonInclude; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java new file mode 100644 index 00000000000..d81f2276a65 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Command to debug chunk information. + */ +package org.apache.hadoop.ozone.debug.chunk; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java index a0aba2a1b15..fae1189d689 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -82,8 +82,8 @@ ExportSubcommand.class, InspectSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ContainerCommands implements Callable, SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class ContainerCommands implements Callable, DebugSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ContainerCommands.class); @@ -104,11 +104,6 @@ public Void call() throws Exception { return null; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - OzoneConfiguration getOzoneConf() { return parent.getOzoneConf(); } @@ -116,7 +111,7 @@ OzoneConfiguration getOzoneConf() { public void loadContainersFromVolumes() throws IOException { OzoneConfiguration conf = parent.getOzoneConf(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(null, 1000, true); ContainerMetrics metrics = ContainerMetrics.create(conf); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java similarity index 59% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java index 0c38fbe33ba..6fbbd1a3083 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; import com.fasterxml.jackson.annotation.JsonAutoDetect; import com.fasterxml.jackson.annotation.JsonInclude; @@ -27,7 +27,6 @@ import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -44,7 +43,9 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; -import org.kohsuke.MetaInfServices; +import org.apache.hadoop.ozone.debug.DBDefinitionFactory; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.hadoop.ozone.utils.Filter; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -53,11 +54,14 @@ import picocli.CommandLine; import java.io.BufferedWriter; +import java.io.File; import java.io.IOException; import java.io.PrintWriter; +import java.lang.reflect.Field; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -71,6 +75,8 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static java.nio.charset.StandardCharsets.UTF_8; @@ -81,8 +87,7 @@ name = "scan", description = "Parse specified metadataTable" ) -@MetaInfServices(SubcommandWithParent.class) -public class DBScanner implements Callable, SubcommandWithParent { +public class DBScanner implements Callable { public static final Logger LOG = LoggerFactory.getLogger(DBScanner.class); private static final String SCHEMA_V3 = "V3"; @@ -101,7 +106,7 @@ public class DBScanner implements Callable, SubcommandWithParent { @CommandLine.Option(names = {"--with-keys"}, description = "Print a JSON object of key->value pairs (default)" + " instead of a JSON array of only values.", - defaultValue = "true") + defaultValue = "true", fallbackValue = "true") private boolean withKey; @CommandLine.Option(names = {"--length", "--limit", "-l"}, @@ -121,6 +126,21 @@ public class DBScanner implements Callable, SubcommandWithParent { description = "Key at which iteration of the DB ends") private String endKey; + @CommandLine.Option(names = {"--fields"}, + description = "Comma-separated list of fields needed for each value. " + + "eg.) \"name,acls.type\" for showing name and type under acls.") + private String fieldsFilter; + + @CommandLine.Option(names = {"--filter"}, + description = "Comma-separated list of \"::\" where " + + " is any valid field of the record, " + + " is [EQUALS,LESSER, GREATER or REGEX]. (EQUALS compares the exact string, " + + "REGEX compares with a valid regular expression passed, and LESSER/GREATER works with numeric values), " + + " is the value of the field. \n" + + "eg.) \"dataSize:equals:1000\" for showing records having the value 1000 for dataSize, \n" + + " \"keyName:regex:^key.*$\" for showing records having keyName that matches the given regex.") + private String filter; + @CommandLine.Option(names = {"--dnSchema", "--dn-schema", "-d"}, description = "Datanode DB Schema Version: V1/V2/V3", defaultValue = "V3") @@ -152,6 +172,14 @@ public class DBScanner implements Callable, SubcommandWithParent { defaultValue = "10") private int threadCount; + @CommandLine.Option(names = {"--max-records-per-file"}, + description = "The number of records to print per file.", + defaultValue = "0") + private long recordsPerFile; + + private int fileSuffix = 0; + private long globalCount = 0; + private static final String KEY_SEPARATOR_SCHEMA_V3 = new OzoneConfiguration().getObject(DatanodeConfiguration.class) .getContainerSchemaV3KeySeparator(); @@ -160,7 +188,8 @@ public class DBScanner implements Callable, SubcommandWithParent { @Override public Void call() throws Exception { - + fileSuffix = 0; + globalCount = 0; List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); final List cfHandleList = new ArrayList<>(); @@ -220,11 +249,29 @@ private boolean displayTable(ManagedRocksIterator iterator, return displayTable(iterator, dbColumnFamilyDef, out(), schemaV3); } + // If there are no parent directories, create them + File file = new File(fileName); + File parentFile = file.getParentFile(); + if (!parentFile.exists()) { + boolean flg = parentFile.mkdirs(); + if (!flg) { + throw new IOException("An exception occurred while creating " + + "the directory. Directorys: " + parentFile.getAbsolutePath()); + } + } + // Write to file output - try (PrintWriter out = new PrintWriter(new BufferedWriter( - new PrintWriter(fileName, UTF_8.name())))) { - return displayTable(iterator, dbColumnFamilyDef, out, schemaV3); + while (iterator.get().isValid() && withinLimit(globalCount)) { + String fileNameTarget = recordsPerFile > 0 ? fileName + "." + fileSuffix++ : + fileName; + try (PrintWriter out = new PrintWriter(new BufferedWriter( + new PrintWriter(fileNameTarget, UTF_8.name())))) { + if (!displayTable(iterator, dbColumnFamilyDef, out, schemaV3)) { + return false; + } + } } + return true; } private boolean displayTable(ManagedRocksIterator iterator, @@ -245,7 +292,7 @@ private boolean displayTable(ManagedRocksIterator iterator, logWriter.start(); processRecords(iterator, dbColumnFamilyDef, logWriter, threadPool, schemaV3); - } catch (InterruptedException e) { + } catch (InterruptedException | IOException e) { exception = true; Thread.currentThread().interrupt(); } finally { @@ -261,7 +308,7 @@ private boolean displayTable(ManagedRocksIterator iterator, private void processRecords(ManagedRocksIterator iterator, DBColumnFamilyDefinition dbColumnFamilyDef, LogWriter logWriter, ExecutorService threadPool, - boolean schemaV3) throws InterruptedException { + boolean schemaV3) throws InterruptedException, IOException { if (startKey != null) { iterator.get().seek(getValueObject(dbColumnFamilyDef, startKey)); } @@ -273,33 +320,64 @@ private void processRecords(ManagedRocksIterator iterator, long count = 0; List> futures = new ArrayList<>(); boolean reachedEnd = false; - while (withinLimit(count) && iterator.get().isValid() && !exception && !reachedEnd) { + + Map fieldsFilterSplitMap = new HashMap<>(); + if (filter != null) { + for (String field : filter.split(",")) { + String[] fieldValue = field.split(":"); + if (fieldValue.length != 3) { + err().println("Error: Invalid format for filter \"" + field + + "\". Usage: ::. Ignoring filter passed"); + } else { + Filter filterValue = new Filter(fieldValue[1], fieldValue[2]); + if (filterValue.getOperator() == null) { + err().println("Error: Invalid operator for filter \"" + filterValue + + "\". can be one of [EQUALS,LESSER,GREATER]. Ignoring filter passed"); + } else { + String[] subfields = fieldValue[0].split("\\."); + getFilterSplit(Arrays.asList(subfields), fieldsFilterSplitMap, filterValue); + } + } + } + } + + while (withinLimit(globalCount) && iterator.get().isValid() && !exception && !reachedEnd) { // if invalid endKey is given, it is ignored if (null != endKey && Arrays.equals(iterator.get().key(), getValueObject(dbColumnFamilyDef, endKey))) { reachedEnd = true; } - batch.add(new ByteArrayKeyValue( - iterator.get().key(), iterator.get().value())); - iterator.get().next(); - count++; - if (batch.size() >= batchSize) { - while (logWriter.getInflightLogCount() > threadCount * 10L - && !exception) { - // Prevents too many unfinished Tasks from - // consuming too much memory. - Thread.sleep(100); + + Object o = dbColumnFamilyDef.getValueCodec().fromPersistedFormat(iterator.get().value()); + if (filter == null || + checkFilteredObject(o, dbColumnFamilyDef.getValueType(), fieldsFilterSplitMap)) { + // the record passes the filter + batch.add(new ByteArrayKeyValue( + iterator.get().key(), iterator.get().value())); + globalCount++; + count++; + if (batch.size() >= batchSize) { + while (logWriter.getInflightLogCount() > threadCount * 10L + && !exception) { + // Prevents too many unfinished Tasks from + // consuming too much memory. + Thread.sleep(100); + } + Future future = threadPool.submit( + new Task(dbColumnFamilyDef, batch, logWriter, sequenceId, + withKey, schemaV3, fieldsFilter)); + futures.add(future); + batch = new ArrayList<>(batchSize); + sequenceId++; } - Future future = threadPool.submit( - new Task(dbColumnFamilyDef, batch, logWriter, sequenceId, - withKey, schemaV3)); - futures.add(future); - batch = new ArrayList<>(batchSize); - sequenceId++; + } + iterator.get().next(); + if ((recordsPerFile > 0) && (count >= recordsPerFile)) { + break; } } if (!batch.isEmpty()) { Future future = threadPool.submit(new Task(dbColumnFamilyDef, - batch, logWriter, sequenceId, withKey, schemaV3)); + batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter)); futures.add(future); } @@ -312,6 +390,158 @@ private void processRecords(ManagedRocksIterator iterator, } } + private void getFilterSplit(List fields, Map fieldMap, Filter leafValue) throws IOException { + int len = fields.size(); + if (len == 1) { + Filter currentValue = fieldMap.get(fields.get(0)); + if (currentValue != null) { + err().println("Cannot pass multiple values for the same field and " + + "cannot have filter for both parent and child"); + throw new IOException("Invalid filter passed"); + } + fieldMap.put(fields.get(0), leafValue); + } else { + Filter fieldMapGet = fieldMap.computeIfAbsent(fields.get(0), k -> new Filter()); + if (fieldMapGet.getValue() != null) { + err().println("Cannot pass multiple values for the same field and " + + "cannot have filter for both parent and child"); + throw new IOException("Invalid filter passed"); + } + Map nextLevel = fieldMapGet.getNextLevel(); + if (nextLevel == null) { + fieldMapGet.setNextLevel(new HashMap<>()); + } + getFilterSplit(fields.subList(1, len), fieldMapGet.getNextLevel(), leafValue); + } + } + + private boolean checkFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { + for (Map.Entry field : fieldsSplitMap.entrySet()) { + try { + Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); + Object valueObject = valueClassField.get(obj); + Filter fieldValue = field.getValue(); + + if (valueObject == null) { + // there is no such field in the record. This filter will be ignored for the current record. + continue; + } + if (fieldValue == null) { + err().println("Malformed filter. Check input"); + throw new IOException("Invalid filter passed"); + } else if (fieldValue.getNextLevel() == null) { + // reached the end of fields hierarchy, check if they match the filter + try { + switch (fieldValue.getOperator()) { + case EQUALS: + if (!String.valueOf(valueObject).equals(fieldValue.getValue())) { + return false; + } + break; + case GREATER: + if (Double.parseDouble(String.valueOf(valueObject)) + < Double.parseDouble(String.valueOf(fieldValue.getValue()))) { + return false; + } + break; + case LESSER: + if (Double.parseDouble(String.valueOf(valueObject)) + > Double.parseDouble(String.valueOf(fieldValue.getValue()))) { + return false; + } + break; + case REGEX: + Pattern p = Pattern.compile(String.valueOf(fieldValue.getValue())); + Matcher m = p.matcher(String.valueOf(valueObject)); + if (!m.find()) { + return false; + } + break; + default: + err().println("Only EQUALS/LESSER/GREATER/REGEX operator is supported currently."); + throw new IOException("Invalid filter passed"); + } + } catch (NumberFormatException ex) { + err().println("LESSER or GREATER operation can be performed only on numeric values."); + throw new IOException("Invalid filter passed"); + } + } else { + Map subfields = fieldValue.getNextLevel(); + if (Collection.class.isAssignableFrom(valueObject.getClass())) { + if (!checkFilteredObjectCollection((Collection) valueObject, subfields)) { + return false; + } + } else if (Map.class.isAssignableFrom(valueObject.getClass())) { + Map valueObjectMap = (Map) valueObject; + boolean flag = false; + for (Map.Entry ob : valueObjectMap.entrySet()) { + boolean subflag; + if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { + subflag = checkFilteredObjectCollection((Collection)ob.getValue(), subfields); + } else { + subflag = checkFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); + } + if (subflag) { + // atleast one item in the map/list of the record has matched the filter, + // so record passes the filter. + flag = true; + break; + } + } + if (!flag) { + // none of the items in the map/list passed the filter => record doesn't pass the filter + return false; + } + } else { + if (!checkFilteredObject(valueObject, valueClassField.getType(), subfields)) { + return false; + } + } + } + } catch (NoSuchFieldException ex) { + err().println("ERROR: no such field: " + field); + exception = true; + return false; + } catch (IllegalAccessException e) { + err().println("ERROR: Cannot get field \"" + field + "\" from record."); + exception = true; + return false; + } catch (Exception ex) { + err().println("ERROR: field: " + field + ", ex: " + ex); + exception = true; + return false; + } + } + return true; + } + + private boolean checkFilteredObjectCollection(Collection valueObject, Map fields) + throws NoSuchFieldException, IllegalAccessException, IOException { + for (Object ob : valueObject) { + if (checkFilteredObject(ob, ob.getClass(), fields)) { + return true; + } + } + return false; + } + + static Field getRequiredFieldFromAllFields(Class clazz, String fieldName) throws NoSuchFieldException { + List classFieldList = ValueSchema.getAllFields(clazz); + Field classField = null; + for (Field f : classFieldList) { + if (f.getName().equals(fieldName)) { + classField = f; + break; + } + } + if (classField == null) { + err().println("Error: Invalid field \"" + fieldName + "\" passed for filter"); + throw new NoSuchFieldException(); + } + classField.setAccessible(true); + return classField; + } + private boolean withinLimit(long i) { return limit == -1L || i < limit; } @@ -413,11 +643,6 @@ private String removeTrailingSlashIfNeeded(String dbPath) { return dbPath; } - @Override - public Class getParentType() { - return RDBParser.class; - } - /** * Utility for centralized JSON serialization using Jackson. */ @@ -465,22 +690,52 @@ private static class Task implements Callable { private final long sequenceId; private final boolean withKey; private final boolean schemaV3; + private String valueFields; + @SuppressWarnings("checkstyle:parameternumber") Task(DBColumnFamilyDefinition dbColumnFamilyDefinition, ArrayList batch, LogWriter logWriter, - long sequenceId, boolean withKey, boolean schemaV3) { + long sequenceId, boolean withKey, boolean schemaV3, String valueFields) { this.dbColumnFamilyDefinition = dbColumnFamilyDefinition; this.batch = batch; this.logWriter = logWriter; this.sequenceId = sequenceId; this.withKey = withKey; this.schemaV3 = schemaV3; + this.valueFields = valueFields; + } + + Map getFieldSplit(List fields, Map fieldMap) { + int len = fields.size(); + if (fieldMap == null) { + fieldMap = new HashMap<>(); + } + if (len == 1) { + fieldMap.putIfAbsent(fields.get(0), null); + } else { + Map fieldMapGet = (Map) fieldMap.get(fields.get(0)); + if (fieldMapGet == null) { + fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), null)); + } else { + fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), fieldMapGet)); + } + } + return fieldMap; } @Override public Void call() { try { ArrayList results = new ArrayList<>(batch.size()); + Map fieldsSplitMap = new HashMap<>(); + + if (valueFields != null) { + for (String field : valueFields.split(",")) { + String[] subfields = field.split("\\."); + fieldsSplitMap = getFieldSplit(Arrays.asList(subfields), fieldsSplitMap); + } + } + for (ByteArrayKeyValue byteArrayKeyValue : batch) { StringBuilder sb = new StringBuilder(); if (!(sequenceId == FIRST_SEQUENCE_ID && results.isEmpty())) { @@ -515,16 +770,76 @@ public Void call() { Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(byteArrayKeyValue.getValue()); - sb.append(WRITER.writeValueAsString(o)); + + if (valueFields != null) { + Map filteredValue = new HashMap<>(); + filteredValue.putAll(getFieldsFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap)); + sb.append(WRITER.writeValueAsString(filteredValue)); + } else { + sb.append(WRITER.writeValueAsString(o)); + } + results.add(sb.toString()); } logWriter.log(results, sequenceId); - } catch (Exception e) { + } catch (IOException e) { exception = true; LOG.error("Exception parse Object", e); } return null; } + + Map getFieldsFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { + Map valueMap = new HashMap<>(); + for (Map.Entry field : fieldsSplitMap.entrySet()) { + try { + Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); + Object valueObject = valueClassField.get(obj); + Map subfields = (Map) field.getValue(); + + if (subfields == null) { + valueMap.put(field.getKey(), valueObject); + } else { + if (Collection.class.isAssignableFrom(valueObject.getClass())) { + List subfieldObjectsList = + getFieldsFilteredObjectCollection((Collection) valueObject, subfields); + valueMap.put(field.getKey(), subfieldObjectsList); + } else if (Map.class.isAssignableFrom(valueObject.getClass())) { + Map subfieldObjectsMap = new HashMap<>(); + Map valueObjectMap = (Map) valueObject; + for (Map.Entry ob : valueObjectMap.entrySet()) { + Object subfieldValue; + if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { + subfieldValue = getFieldsFilteredObjectCollection((Collection)ob.getValue(), subfields); + } else { + subfieldValue = getFieldsFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); + } + subfieldObjectsMap.put(ob.getKey(), subfieldValue); + } + valueMap.put(field.getKey(), subfieldObjectsMap); + } else { + valueMap.put(field.getKey(), + getFieldsFilteredObject(valueObject, valueClassField.getType(), subfields)); + } + } + } catch (NoSuchFieldException ex) { + err().println("ERROR: no such field: " + field); + } catch (IllegalAccessException e) { + err().println("ERROR: Cannot get field from object: " + field); + } + } + return valueMap; + } + + List getFieldsFilteredObjectCollection(Collection valueObject, Map fields) + throws NoSuchFieldException, IllegalAccessException { + List subfieldObjectsList = new ArrayList<>(); + for (Object ob : valueObject) { + Object subfieldValue = getFieldsFilteredObject(ob, ob.getClass(), fields); + subfieldObjectsList.add(subfieldValue); + } + return subfieldObjectsList; + } } private static class ByteArrayKeyValue { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DropTable.java similarity index 90% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DropTable.java index 745712850b9..5326adc0e2b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DropTable.java @@ -16,10 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.debug.RocksDBUtils; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import picocli.CommandLine; @@ -37,7 +37,7 @@ name = "drop_column_family", description = "drop column family in db." ) -public class DropTable implements Callable, SubcommandWithParent { +public class DropTable implements Callable { @CommandLine.Option(names = {"--column-family", "--column_family"}, description = "Table name") @@ -73,9 +73,4 @@ public Void call() throws Exception { } return null; } - - @Override - public Class getParentType() { - return RDBParser.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ListTables.java similarity index 82% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ListTables.java index 494f42e5877..d115a44da8b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ListTables.java @@ -16,16 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; - import org.apache.hadoop.hdds.utils.db.RocksDatabase; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; /** @@ -36,8 +33,7 @@ aliases = "ls", description = "list all column families in db." ) -@MetaInfServices(SubcommandWithParent.class) -public class ListTables implements Callable, SubcommandWithParent { +public class ListTables implements Callable { @CommandLine.ParentCommand private RDBParser parent; @@ -51,9 +47,4 @@ public Void call() throws Exception { } return null; } - - @Override - public Class getParentType() { - return RDBParser.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java similarity index 81% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java index f133386ab13..4e945c7c418 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java @@ -16,12 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -33,9 +33,15 @@ */ @CommandLine.Command( name = "ldb", + subcommands = { + DBScanner.class, + DropTable.class, + ListTables.class, + ValueSchema.class, + }, description = "Parse rocksdb file content") -@MetaInfServices(SubcommandWithParent.class) -public class RDBParser implements Callable, SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class RDBParser implements Callable, DebugSubcommand { @Spec private CommandSpec spec; @@ -53,11 +59,6 @@ public void setDbPath(String dbPath) { this.dbPath = dbPath; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java similarity index 86% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java index a5029b3e6b9..4b8eb3b3208 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java @@ -16,15 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.OzoneConsts; -import org.kohsuke.MetaInfServices; +import org.apache.hadoop.ozone.debug.DBDefinitionFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -52,8 +51,7 @@ name = "value-schema", description = "Schema of value in metadataTable" ) -@MetaInfServices(SubcommandWithParent.class) -public class ValueSchema implements Callable, SubcommandWithParent { +public class ValueSchema implements Callable { @CommandLine.ParentCommand private RDBParser parent; @@ -88,7 +86,7 @@ public Void call() throws Exception { String dbPath = parent.getDbPath(); Map fields = new HashMap<>(); - success = getValueFields(dbPath, fields); + success = getValueFields(dbPath, fields, depth, tableName, dnDBSchemaVersion); out().println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(fields)); @@ -101,7 +99,8 @@ public Void call() throws Exception { return null; } - private boolean getValueFields(String dbPath, Map valueSchema) { + public static boolean getValueFields(String dbPath, Map valueSchema, int d, String table, + String dnDBSchemaVersion) { dbPath = removeTrailingSlashIfNeeded(dbPath); DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion); @@ -111,19 +110,19 @@ private boolean getValueFields(String dbPath, Map valueSchema) { return false; } final DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(tableName); + dbDefinition.getColumnFamily(table); if (columnFamilyDefinition == null) { - err().print("Error: Table with name '" + tableName + "' not found"); + err().print("Error: Table with name '" + table + "' not found"); return false; } Class c = columnFamilyDefinition.getValueType(); - valueSchema.put(c.getSimpleName(), getFieldsStructure(c, depth)); + valueSchema.put(c.getSimpleName(), getFieldsStructure(c, d)); return true; } - private Object getFieldsStructure(Class clazz, int currentDepth) { + private static Object getFieldsStructure(Class clazz, int currentDepth) { if (clazz.isPrimitive() || String.class.equals(clazz)) { return clazz.getSimpleName(); } else if (currentDepth == 0) { @@ -148,7 +147,7 @@ private Object getFieldsStructure(Class clazz, int currentDepth) { } } - private List getAllFields(Class clazz) { + public static List getAllFields(Class clazz) { // NOTE: Schema of interface type, like ReplicationConfig, cannot be fetched. // An empty list "[]" will be shown for such types of fields. if (clazz == null) { @@ -171,12 +170,7 @@ private static PrintWriter out() { return spec.commandLine().getOut(); } - @Override - public Class getParentType() { - return RDBParser.class; - } - - private String removeTrailingSlashIfNeeded(String dbPath) { + private static String removeTrailingSlashIfNeeded(String dbPath) { if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { dbPath = dbPath.substring(0, dbPath.length() - 1); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/package-info.java new file mode 100644 index 00000000000..d69d92e6f1b --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * RDB debug related commands. + */ +package org.apache.hadoop.ozone.debug.ldb; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/BaseLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/BaseLogParser.java similarity index 97% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/BaseLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/BaseLogParser.java index a3ea9f6a1f6..4d2fb4b23c2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/BaseLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/BaseLogParser.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import com.google.common.annotations.VisibleForTesting; import org.apache.ratis.proto.RaftProtos; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/DatanodeRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/DatanodeRatisLogParser.java similarity index 97% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/DatanodeRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/DatanodeRatisLogParser.java index 9f35e8b3c31..3f0f70c281b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/DatanodeRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/DatanodeRatisLogParser.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.ozone.container.common.transport.server diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/GenericRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/GenericRatisLogParser.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/GenericRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/GenericRatisLogParser.java index d989527c341..8bd0d0ff5a8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/GenericRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/GenericRatisLogParser.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import picocli.CommandLine; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/OMRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/OMRatisLogParser.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/OMRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/OMRatisLogParser.java index 1ce8b63dab1..eb6214e6471 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/OMRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/OMRatisLogParser.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/RatisLogParser.java similarity index 73% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/RatisLogParser.java index d41ee2dec16..1fad2b607c1 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/RatisLogParser.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.apache.hadoop.ozone.debug.OzoneDebug; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -39,15 +37,6 @@ }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -@MetaInfServices(SubcommandWithParent.class) -public class RatisLogParser extends GenericCli implements SubcommandWithParent { - - public static void main(String[] args) { - new RatisLogParser().run(args); - } - - @Override - public Class getParentType() { - return OzoneDebug.class; - } +@MetaInfServices(DebugSubcommand.class) +public class RatisLogParser implements DebugSubcommand { } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/SCMRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/SCMRatisLogParser.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/SCMRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/SCMRatisLogParser.java index db0cd8bd14e..dfdbdd2d847 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/SCMRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/SCMRatisLogParser.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.ha.SCMRatisRequest; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/package-info.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/package-info.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/package-info.java index 727bb8aa4e0..d5c1027bfe4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/package-info.java @@ -15,8 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; /** * Command line utility for dump ratis log files. */ +package org.apache.hadoop.ozone.debug.segmentparser; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 20acad0562a..3627b917f00 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -290,6 +290,10 @@ public void init() { //replace environment variables to support multi-node execution prefix = resolvePrefix(prefix); } + if (duration != null && !allowDuration()) { + LOG.warn("--duration is ignored"); + duration = null; + } if (duration != null) { durationInSecond = TimeDurationUtil.getTimeDurationHelper( "--runtime", duration, TimeUnit.SECONDS); @@ -554,6 +558,15 @@ public String getPrefix() { return prefix; } + /** + * Whether to enable Duration. + * If enabled, the command will load the --duration option. + * If not enabled, the command will not load the --duration option. + */ + public boolean allowDuration() { + return true; + } + public MetricRegistry getMetrics() { return metrics; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 0c525457aac..f229eb43bc6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -35,6 +35,8 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicator; @@ -83,11 +85,22 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements private ContainerReplicator replicator; private Timer timer; + private WitnessedContainerMetadataStore witnessedContainerMetadataStore; private List replicationTasks; @Override public Void call() throws Exception { + try { + return replicate(); + } finally { + if (witnessedContainerMetadataStore != null) { + witnessedContainerMetadataStore.close(); + } + } + } + + public Void replicate() throws Exception { OzoneConfiguration conf = createOzoneConfiguration(); @@ -102,7 +115,7 @@ public Void call() throws Exception { new ContainerOperationClient(conf); final List containerInfos = - containerOperationClient.listContainer(0L, 1_000_000); + containerOperationClient.listContainer(0L, 1_000_000).getContainerInfoList(); //logic same as the download+import on the destination datanode initializeReplicationSupervisor(conf, containerInfos.size() * 2); @@ -174,8 +187,10 @@ private void initializeReplicationSupervisor( if (fakeDatanodeUuid.isEmpty()) { fakeDatanodeUuid = UUID.randomUUID().toString(); } - - ContainerSet containerSet = new ContainerSet(1000); + WitnessedContainerMetadataStore referenceCountedDS = + WitnessedContainerMetadataStoreImpl.get(conf); + this.witnessedContainerMetadataStore = referenceCountedDS; + ContainerSet containerSet = new ContainerSet(referenceCountedDS.getContainerIdsTable(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java index da16026210f..66bc7943676 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java @@ -23,9 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; -import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; /** @@ -109,20 +107,17 @@ private void doFlushOrSync(OutputStream outputStream) throws IOException { // noop break; case HFLUSH: - if (StoreImplementationUtils.hasCapability( - outputStream, StreamCapabilities.HSYNC)) { - ((Syncable)outputStream).hflush(); + if (outputStream instanceof Syncable) { + ((Syncable) outputStream).hflush(); } break; case HSYNC: - if (StoreImplementationUtils.hasCapability( - outputStream, StreamCapabilities.HSYNC)) { - ((Syncable)outputStream).hsync(); + if (outputStream instanceof Syncable) { + ((Syncable) outputStream).hsync(); } break; default: - throw new IllegalArgumentException("Unsupported sync option" - + flushOrSync); + throw new IllegalArgumentException("Unsupported sync option" + flushOrSync); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java index f83b2a1a4a9..a7527952ca3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; -import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; @@ -150,11 +150,14 @@ public Void call() throws Exception { } encodedContainerToken = scmClient.getEncodedContainerToken(containerID); XceiverClientFactory xceiverClientManager; + OzoneManagerProtocolClientSideTranslatorPB omClient; if (OzoneSecurityUtil.isSecurityEnabled(configuration)) { - CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, configuration); + omClient = createOmClient(configuration, null); + CACertificateProvider caCerts = () -> omClient.getServiceInfo().provideCACerts(); xceiverClientManager = new XceiverClientCreator(configuration, new ClientTrustManager(caCerts, null)); } else { + omClient = null; xceiverClientManager = new XceiverClientCreator(configuration); } clients = new ArrayList<>(numClients); @@ -169,6 +172,9 @@ public Void call() throws Exception { try { runTests(this::sendRPCReq); } finally { + if (omClient != null) { + omClient.close(); + } for (XceiverClientSpi client : clients) { xceiverClientManager.releaseClient(client, false); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java index 2b178ac0aec..a86b4789fef 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java @@ -463,13 +463,12 @@ private DatanodeDetails randomDatanodeDetails(ConfigurationSource config) details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); details.setHostName(HddsUtils.getHostName(config)); details.setIpAddress(randomIp()); - details.setPort(DatanodeDetails.Port.Name.STANDALONE, 0); - details.setPort(DatanodeDetails.Port.Name.RATIS, 0); - details.setPort(DatanodeDetails.Port.Name.REST, 0); + details.setStandalonePort(0); + details.setRatisPort(0); + details.setRestPort(0); details.setVersion(HDDS_VERSION_INFO.getVersion()); details.setSetupTime(Time.now()); details.setRevision(HDDS_VERSION_INFO.getRevision()); - details.setBuildDate(HDDS_VERSION_INFO.getDate()); details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); return details; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index d0c9a33b330..ccae53f345b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -75,7 +75,8 @@ DatanodeSimulator.class, OmMetadataGenerator.class, DNRPCLoadGenerator.class, - HsyncGenerator.class + HsyncGenerator.class, + OzoneClientCreator.class, }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) @@ -83,10 +84,6 @@ public class Freon extends GenericCli { public static final Logger LOG = LoggerFactory.getLogger(Freon.class); - public Freon() { - super(Freon.class); - } - @Option(names = "--server", description = "Enable internal http server to provide metric " + "and profile endpoint") diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java index 3eb879d5c06..5bc2c409318 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java @@ -31,7 +31,7 @@ import java.util.concurrent.atomic.AtomicLong; /** - * Directory & File Generator tool to test OM performance. + * Directory and File Generator tool to test OM performance. */ @Command(name = "dtsg", aliases = "dfs-tree-generator", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java index 8de2ee032d0..687030ab325 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java @@ -16,17 +16,13 @@ */ package org.apache.hadoop.ozone.freon; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicInteger; - +import com.codahale.metrics.Timer; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import com.codahale.metrics.Timer; import org.apache.hadoop.ozone.util.PayloadUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,17 +30,25 @@ import picocli.CommandLine.Command; import picocli.CommandLine.Option; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; + /** * Data generator tool test hsync/write synchronization performance. + * This tool simulates the way HBase writes transaction logs (WAL) to a file in Ozone: + * - Transactions are written to the file's OutputStream by a single thread, each transaction is numbered by an + * increasing counter. Every transaction can be serialized to the OutputStream via multiple write calls. + * - Multiple threads checks and sync (hsync) the OutputStream to make it persistent. * * Example usage: * - * To generate 1000 hsync calls with 10 threads on a single file: - * ozone freon hsync-generator -t 10 --bytes-per-write=1024 -n 1000 - * - * To generate 1000 hsync calls with 10 threads on 3 files simultaneously: + * To simulate hlog that generates 1M hsync calls with 5 threads: * - * ozone freon hsync-generator -t 10 --bytes-per-write=1024 --number-of-files=3 -n 1000 + * ozone freon hsync-generator -t 5 --writes-per-transaction=32 --bytes-per-write=8 -n 1000000 * */ @Command(name = "hg", @@ -53,32 +57,38 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) -public class HsyncGenerator extends HadoopNestedDirGenerator implements Callable { +public class HsyncGenerator extends BaseFreonGenerator implements Callable { private static final Logger LOG = LoggerFactory.getLogger(HsyncGenerator.class); @CommandLine.ParentCommand private Freon freon; + @Option(names = {"--path"}, + description = "Hadoop FS file system path. Use full path.", + defaultValue = "o3fs://bucket1.vol1") + private String rootPath; + @Option(names = {"--bytes-per-write"}, description = "Size of each write", - defaultValue = "1024") + defaultValue = "8") private int writeSize; - @Option(names = {"--number-of-files"}, - description = "Number of files to run test.", - defaultValue = "1") - private int numberOfFiles; + @Option(names = {"--writes-per-transaction"}, + description = "Size of each write", + defaultValue = "32") + private int writesPerTransaction; private Timer timer; private OzoneConfiguration configuration; - private FSDataOutputStream[] outputStreams; - private Path[] files; - private AtomicInteger[] callsPerFile; + private FSDataOutputStream outputStream; + private byte[] data; + private final BlockingQueue writtenTransactions = new ArrayBlockingQueue<>(10_000); + private final AtomicInteger lastSyncedTransaction = new AtomicInteger(); public HsyncGenerator() { } - private byte[] data; + @VisibleForTesting HsyncGenerator(OzoneConfiguration ozoneConfiguration) { @@ -87,55 +97,75 @@ public HsyncGenerator() { @Override public Void call() throws Exception { - super.init(); + init(); if (configuration == null) { configuration = freon.createOzoneConfiguration(); } + URI uri = URI.create(rootPath); - outputStreams = new FSDataOutputStream[numberOfFiles]; - files = new Path[numberOfFiles]; - callsPerFile = new AtomicInteger[numberOfFiles]; - FileSystem fileSystem = getFileSystem(); - for (int i = 0; i < numberOfFiles; i++) { - Path file = new Path(getRootPath() + "/" + generateObjectName(i)); - fileSystem.mkdirs(file.getParent()); - outputStreams[i] = fileSystem.create(file); - files[i] = file; - callsPerFile[i] = new AtomicInteger(); - - LOG.info("Created file for testing: {}", file); - } + FileSystem fileSystem = FileSystem.get(uri, configuration); + Path file = new Path(rootPath + "/" + generateObjectName(0)); + fileSystem.mkdirs(file.getParent()); + outputStream = fileSystem.create(file); + + LOG.info("Created file for testing: {}", file); timer = getMetrics().timer("hsync-generator"); data = PayloadUtils.generatePayload(writeSize); + startTransactionWriter(); + try { runTests(this::sendHsync); } finally { - for (FSDataOutputStream outputStream : outputStreams) { - outputStream.close(); - } + outputStream.close(); + fileSystem.close(); } - StringBuilder distributionReport = new StringBuilder(); - for (int i = 0; i < numberOfFiles; i++) { - distributionReport.append("\t").append(files[i]).append(": ").append(callsPerFile[i]).append("\n"); - } + return null; + } - LOG.info("Hsync generator finished, calls distribution: \n {}", distributionReport); + private void startTransactionWriter() { + Thread transactionWriter = new Thread(this::generateTransactions); + transactionWriter.setDaemon(true); + transactionWriter.start(); + } - return null; + private void generateTransactions() { + int transaction = 0; + while (true) { + for (int i = 0; i < writesPerTransaction; i++) { + try { + if (writeSize > 1) { + outputStream.write(data); + } else { + outputStream.write(i); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + try { + writtenTransactions.put(transaction++); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } } private void sendHsync(long counter) throws Exception { timer.time(() -> { - int i = ((int) counter) % numberOfFiles; - FSDataOutputStream outputStream = outputStreams[i]; - outputStream.write(data); - outputStream.hsync(); - callsPerFile[i].incrementAndGet(); - return null; + while (true) { + int transaction = writtenTransactions.take(); + int lastSynced = lastSyncedTransaction.get(); + if (transaction > lastSynced) { + outputStream.hsync(); + lastSyncedTransaction.compareAndSet(lastSynced, transaction); + return null; + } + } }); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java index a3e21d58e2f..27ebc877633 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java @@ -57,6 +57,11 @@ public class OmBucketGenerator extends BaseFreonGenerator private Timer bucketCreationTimer; + @Override + public boolean allowDuration() { + return false; + } + @Override public Void call() throws Exception { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java index 9c98817185e..d5fbdc75f19 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java @@ -109,8 +109,7 @@ private void createKey(long counter) throws Exception { .setKeyName(generateObjectName(counter)) .setReplicationConfig(replicationConfig) .setLocationInfoList(new ArrayList<>()) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .setOwnerName(ownerName) .build(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java index 24060b0bac8..4c277f07422 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java @@ -272,8 +272,7 @@ private OmKeyArgs.Builder createKeyArgsBuilder() { .setVolumeName(volumeName) .setReplicationConfig(replicationConfig) .setLocationInfoList(new ArrayList<>()) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)); + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)); } private String getPath(long counter) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java new file mode 100644 index 00000000000..2fc4cb48eac --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.freon; + +import com.codahale.metrics.Timer; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Creates and closes Ozone clients. + */ +@CommandLine.Command(name = "occ", + aliases = "ozone-client-creator", + description = "Create and close Ozone clients without doing anything useful", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true, + showDefaultValues = true) +public class OzoneClientCreator extends BaseFreonGenerator implements Callable { + + @CommandLine.Option(names = "--om-service-id", + description = "OM Service ID" + ) + private String omServiceID; + + private Timer timer; + private OzoneConfiguration conf; + + @Override + public Void call() { + init(); + conf = createOzoneConfiguration(); + timer = getMetrics().timer("client-create"); + runTests(this::createClient); + return null; + } + + private void createClient(long step) { + timer.time(this::createClientSafely); + } + + private void createClientSafely() { + try { + createOzoneClient(omServiceID, conf).close(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index c964676f266..58b62d22b98 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -355,11 +355,7 @@ public Void call() throws Exception { // wait until all keys are added or exception occurred. while ((numberOfKeysAdded.get() != totalKeyCount) && exception == null) { - try { - Thread.sleep(CHECK_INTERVAL_MILLIS); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(CHECK_INTERVAL_MILLIS); } executor.shutdown(); executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); @@ -373,11 +369,7 @@ public Void call() throws Exception { if (validateExecutor != null) { while (!validationQueue.isEmpty()) { - try { - Thread.sleep(CHECK_INTERVAL_MILLIS); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(CHECK_INTERVAL_MILLIS); } validateExecutor.shutdown(); validateExecutor.awaitTermination(Integer.MAX_VALUE, @@ -421,11 +413,7 @@ private void doCleanObjects() throws InterruptedException { // wait until all Buckets are cleaned or exception occurred. while ((numberOfBucketsCleaned.get() != totalBucketCount) && exception == null) { - try { - Thread.sleep(CHECK_INTERVAL_MILLIS); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(CHECK_INTERVAL_MILLIS); } } catch (InterruptedException e) { LOG.error("Failed to wait until all Buckets are cleaned", e); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java index ef7a85a4121..0233c14470a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java @@ -28,9 +28,9 @@ * Generate buckets via the s3 interface. * * For a secure cluster, - * $> init user keytab - * $> kinit -kt /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM - * $> eval $(ozone s3 getsecret -e) + * $> init user keytab + * $> kinit -kt /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM + * $> eval $(ozone s3 getsecret -e) * for getting and exporting access_key_id and secret_access_key * to freon shell test environment * secret access key. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java index d62ad1d79c1..6dc0efae0d2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java @@ -867,12 +867,9 @@ private static DatanodeDetails createRandomDatanodeDetails() { RANDOM.nextInt(256) + "." + RANDOM.nextInt(256) + "." + RANDOM .nextInt(256) + "." + RANDOM.nextInt(256); - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(uuid).setHostName("localhost") .setIpAddress(ipAddress) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java index 66656d315d8..a15caab7d6b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java @@ -59,7 +59,7 @@ public Void call() throws Exception { ConfigurationSource config = createOzoneConfiguration(); - scmDb = DBStoreBuilder.createDBStore(config, new SCMDBDefinition()); + scmDb = DBStoreBuilder.createDBStore(config, SCMDBDefinition.get()); containerStore = CONTAINERS.getTable(scmDb); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java index a7a74c2e372..b0ac5b0033e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java @@ -67,8 +67,8 @@ public static void main(String[] args) throws IOException { /** * Generates Container Id to Blocks and BlockDetails mapping. * @param configuration @{@link OzoneConfiguration} - * @return Map>> - * Map of ContainerId -> (Block, Block info) + * @return {@code Map>> + * Map of ContainerId -> (Block, Block info)} * @throws IOException */ public Map>> diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java index 16644522808..b1ed206f975 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.repair; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.cli.ExtensibleParentCommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.cli.RepairSubcommand; import picocli.CommandLine; import java.nio.charset.StandardCharsets; @@ -34,39 +34,13 @@ description = "Operational tool to repair Ozone", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneRepair extends GenericCli { +public class OzoneRepair extends GenericCli implements ExtensibleParentCommand { public static final String WARNING_SYS_USER_MESSAGE = "ATTENTION: Running as user %s. Make sure this is the same user used to run the Ozone process." + " Are you sure you want to continue (y/N)? "; - - private OzoneConfiguration ozoneConf; - - public OzoneRepair() { - super(OzoneRepair.class); - } - - @VisibleForTesting - public OzoneRepair(OzoneConfiguration configuration) { - super(OzoneRepair.class); - this.ozoneConf = configuration; - } - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; - } - - /** - * Main for the Ozone Repair shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { + public static void main(String[] argv) { new OzoneRepair().run(argv); } @@ -91,4 +65,8 @@ public String getConsoleReadLineWithFormat(String currentUser) { return (new Scanner(System.in, StandardCharsets.UTF_8.name())).nextLine().trim(); } + @Override + public Class subcommandType() { + return RepairSubcommand.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java new file mode 100644 index 00000000000..e6462aa3f85 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java @@ -0,0 +1,256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair; + +import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; +import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient; +import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.debug.DBDefinitionFactory; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import java.security.cert.CertificateFactory; +import org.kohsuke.MetaInfServices; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import picocli.CommandLine; + +import java.io.IOException; +import java.io.PrintWriter; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Paths; +import java.security.cert.CertPath; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Map; +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; +import java.util.Optional; +import java.util.Arrays; +import java.util.concurrent.Callable; + +import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.VALID_SCM_CERTS; +import static org.apache.hadoop.hdds.security.x509.certificate.client.DefaultCertificateClient.CERT_FILE_NAME_FORMAT; +import static org.apache.hadoop.ozone.om.helpers.OzoneFSUtils.removeTrailingSlashIfNeeded; + +/** + * In case of accidental deletion of SCM certificates from local storage, + * this tool restores the certs that are persisted into the SCM DB. + * Note that this will only work if the SCM has persisted certs in its RocksDB + * and private keys of the SCM are intact. + */ +@CommandLine.Command( + name = "cert-recover", + description = "Recover Deleted SCM Certificate from RocksDB") +@MetaInfServices(RepairSubcommand.class) +public class RecoverSCMCertificate implements Callable, RepairSubcommand { + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "SCM DB Path") + private String dbPath; + + @CommandLine.ParentCommand + private OzoneRepair parent; + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + private PrintWriter err() { + return spec.commandLine().getErr(); + } + + private PrintWriter out() { + return spec.commandLine().getOut(); + } + + @Override + public Void call() throws Exception { + dbPath = removeTrailingSlashIfNeeded(dbPath); + String tableName = VALID_SCM_CERTS.getName(); + DBDefinition dbDefinition = + DBDefinitionFactory.getDefinition(Paths.get(dbPath), new OzoneConfiguration()); + if (dbDefinition == null) { + throw new Exception("Error: Incorrect DB Path"); + } + DBColumnFamilyDefinition columnFamilyDefinition = + getDbColumnFamilyDefinition(tableName, dbDefinition); + + try { + List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(dbPath); + final List cfHandleList = new ArrayList<>(); + byte[] tableNameBytes = tableName.getBytes(StandardCharsets.UTF_8); + ColumnFamilyHandle cfHandle = null; + try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, cfDescList, + cfHandleList)) { + cfHandle = getColumnFamilyHandle(cfHandleList, tableNameBytes); + SecurityConfig securityConfig = new SecurityConfig(parent.getOzoneConf()); + + Map allCerts = getAllCerts(columnFamilyDefinition, cfHandle, db); + out().println("All Certs in DB : " + allCerts.keySet()); + String hostName = InetAddress.getLocalHost().getHostName(); + out().println("Host: " + hostName); + + X509Certificate subCertificate = getSubCertificate(allCerts, hostName); + X509Certificate rootCertificate = getRootCertificate(allCerts); + + out().println("Sub cert serialID for this host: " + subCertificate.getSerialNumber().toString()); + out().println("Root cert serialID: " + rootCertificate.getSerialNumber().toString()); + + boolean isRootCA = false; + + String caPrincipal = rootCertificate.getSubjectDN().getName(); + if (caPrincipal.contains(hostName)) { + isRootCA = true; + } + storeCerts(subCertificate, rootCertificate, isRootCA, securityConfig); + } + } catch (RocksDBException | CertificateException exception) { + err().print("Failed to recover scm cert"); + } + return null; + } + + private static ColumnFamilyHandle getColumnFamilyHandle( + List cfHandleList, byte[] tableNameBytes) throws Exception { + ColumnFamilyHandle cfHandle = null; + for (ColumnFamilyHandle cf : cfHandleList) { + if (Arrays.equals(cf.getName(), tableNameBytes)) { + cfHandle = cf; + break; + } + } + if (cfHandle == null) { + throw new Exception("Error: VALID_SCM_CERTS table not found in DB"); + } + return cfHandle; + } + + private static X509Certificate getRootCertificate( + Map allCerts) throws Exception { + Optional cert = allCerts.values().stream().filter( + c -> c.getSubjectDN().getName() + .contains(OzoneConsts.SCM_ROOT_CA_PREFIX)).findFirst(); + if (!cert.isPresent()) { + throw new Exception("Root CA Cert not found in the DB for this host, Certs in the DB : " + allCerts.keySet()); + } + return cert.get(); + } + + + private static X509Certificate getSubCertificate( + Map allCerts, String hostName) throws Exception { + Optional cert = allCerts.values().stream().filter( + c -> c.getSubjectDN().getName() + .contains(OzoneConsts.SCM_SUB_CA_PREFIX) && c.getSubjectDN() + .getName().contains(hostName)).findFirst(); + if (!cert.isPresent()) { + throw new Exception("Sub CA Cert not found in the DB for this host, Certs in the DB : " + allCerts.keySet()); + } + return cert.get(); + } + + private static Map getAllCerts( + DBColumnFamilyDefinition columnFamilyDefinition, + ColumnFamilyHandle cfHandle, ManagedRocksDB db) throws IOException, RocksDBException { + Map allCerts = new HashMap<>(); + ManagedRocksIterator rocksIterator = ManagedRocksIterator.managed(db.get().newIterator(cfHandle)); + rocksIterator.get().seekToFirst(); + while (rocksIterator.get().isValid()) { + BigInteger id = (BigInteger) columnFamilyDefinition.getKeyCodec() + .fromPersistedFormat(rocksIterator.get().key()); + X509Certificate certificate = + (X509Certificate) columnFamilyDefinition.getValueCodec() + .fromPersistedFormat(rocksIterator.get().value()); + allCerts.put(id, certificate); + rocksIterator.get().next(); + } + return allCerts; + } + + private static DBColumnFamilyDefinition getDbColumnFamilyDefinition( + String tableName, DBDefinition dbDefinition) throws Exception { + DBColumnFamilyDefinition columnFamilyDefinition = + dbDefinition.getColumnFamily(tableName); + if (columnFamilyDefinition == null) { + throw new Exception( + "Error: VALID_SCM_CERTS table no found in Definition"); + } + return columnFamilyDefinition; + } + + private void storeCerts(X509Certificate scmCertificate, + X509Certificate rootCertificate, boolean isRootCA, SecurityConfig securityConfig) + throws CertificateException, IOException { + CertificateCodec certCodec = + new CertificateCodec(securityConfig, SCMCertificateClient.COMPONENT_NAME); + + out().println("Writing certs to path : " + certCodec.getLocation().toString()); + + CertPath certPath = addRootCertInPath(scmCertificate, rootCertificate); + CertPath rootCertPath = getRootCertPath(rootCertificate); + String encodedCert = CertificateCodec.getPEMEncodedString(certPath); + String certName = String.format(CERT_FILE_NAME_FORMAT, + CAType.NONE.getFileNamePrefix() + scmCertificate.getSerialNumber().toString()); + certCodec.writeCertificate(certName, encodedCert); + + String rootCertName = String.format(CERT_FILE_NAME_FORMAT, + CAType.SUBORDINATE.getFileNamePrefix() + rootCertificate.getSerialNumber().toString()); + String encodedRootCert = CertificateCodec.getPEMEncodedString(rootCertPath); + certCodec.writeCertificate(rootCertName, encodedRootCert); + + certCodec.writeCertificate(certCodec.getLocation().toAbsolutePath(), + securityConfig.getCertificateFileName(), encodedCert); + + if (isRootCA) { + CertificateCodec rootCertCodec = + new CertificateCodec(securityConfig, OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME); + out().println("Writing root certs to path : " + rootCertCodec.getLocation().toString()); + rootCertCodec.writeCertificate(rootCertCodec.getLocation().toAbsolutePath(), + securityConfig.getCertificateFileName(), encodedRootCert); + } + } + + public CertPath addRootCertInPath(X509Certificate scmCert, + X509Certificate rootCert) throws CertificateException { + ArrayList updatedList = new ArrayList<>(); + updatedList.add(scmCert); + updatedList.add(rootCert); + CertificateFactory certFactory = + CertificateCodec.getCertFactory(); + return certFactory.generateCertPath(updatedList); + } + + public CertPath getRootCertPath(X509Certificate rootCert) + throws CertificateException { + ArrayList updatedList = new ArrayList<>(); + updatedList.add(rootCert); + CertificateFactory factory = CertificateCodec.getCertFactory(); + return factory.generateCertPath(updatedList); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java similarity index 82% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java index 0f36934ec14..01ad705b201 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java @@ -16,10 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.repair; +package org.apache.hadoop.ozone.repair.ldb; import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -29,9 +29,13 @@ * Ozone Repair CLI for RocksDB. */ @CommandLine.Command(name = "ldb", + subcommands = { + SnapshotRepair.class, + TransactionInfoRepair.class, + }, description = "Operational tool to repair RocksDB table.") -@MetaInfServices(SubcommandWithParent.class) -public class RDBRepair implements Callable, SubcommandWithParent { +@MetaInfServices(RepairSubcommand.class) +public class RDBRepair implements Callable, RepairSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; @@ -50,9 +54,4 @@ public Void call() { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneRepair.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java index d07fc13be8a..45c10f5668b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java @@ -16,18 +16,15 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.repair.om; +package org.apache.hadoop.ozone.repair.ldb; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.ozone.debug.RocksDBUtils; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.repair.RDBRepair; import org.apache.hadoop.ozone.shell.bucket.BucketUri; -import org.kohsuke.MetaInfServices; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -55,8 +52,7 @@ name = "snapshot", description = "CLI to update global and path previous snapshot for a snapshot in case snapshot chain is corrupted." ) -@MetaInfServices(SubcommandWithParent.class) -public class SnapshotRepair implements Callable, SubcommandWithParent { +public class SnapshotRepair implements Callable { protected static final Logger LOG = LoggerFactory.getLogger(SnapshotRepair.class); @@ -178,9 +174,4 @@ private Set getSnapshotIdSet(ManagedRocksDB db, ColumnFamilyHandle snapsho } return snapshotIdSet; } - - @Override - public Class getParentType() { - return RDBRepair.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java similarity index 93% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java index f2a63317378..277a2788247 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java @@ -19,16 +19,14 @@ * permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair; +package org.apache.hadoop.ozone.repair.ldb; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.debug.RocksDBUtils; -import org.kohsuke.MetaInfServices; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -42,7 +40,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE; - /** * Tool to update the highest term-index in transactionInfoTable. */ @@ -52,9 +49,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -@MetaInfServices(SubcommandWithParent.class) -public class TransactionInfoRepair - implements Callable, SubcommandWithParent { +public class TransactionInfoRepair implements Callable { @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; @@ -127,9 +122,4 @@ protected RDBRepair getParent() { return parent; } - @Override - public Class getParentType() { - return RDBRepair.class; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java new file mode 100644 index 00000000000..388d4b7dcea --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * RDB related repair tools. + */ +package org.apache.hadoop.ozone.repair.ldb; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java new file mode 100644 index 00000000000..5a217e9f2de --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Parser for scm.db file. + */ +@CommandLine.Command( + name = "fso-tree", + description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " + + "OM should be stopped while this tool is run." +) +public class FSORepairCLI implements Callable { + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Path to OM RocksDB") + private String dbPath; + + @CommandLine.Option(names = {"-r", "--repair"}, + defaultValue = "false", + description = "Run in repair mode to move unreferenced files and directories to deleted tables.") + private boolean repair; + + @CommandLine.Option(names = {"-v", "--volume"}, + description = "Filter by volume name. Add '/' before the volume name.") + private String volume; + + @CommandLine.Option(names = {"-b", "--bucket"}, + description = "Filter by bucket name") + private String bucket; + + @CommandLine.Option(names = {"--verbose"}, + description = "Verbose output. Show all intermediate steps and deleted keys info.") + private boolean verbose; + + @Override + public Void call() throws Exception { + if (repair) { + System.out.println("FSO Repair Tool is running in repair mode"); + } else { + System.out.println("FSO Repair Tool is running in debug mode"); + } + try { + FSORepairTool + repairTool = new FSORepairTool(dbPath, repair, volume, bucket, verbose); + repairTool.run(); + } catch (Exception ex) { + throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); + } + + if (verbose) { + System.out.println("FSO repair finished."); + } + + return null; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java new file mode 100644 index 00000000000..7e0fb23f5aa --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -0,0 +1,710 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair.om; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.helpers.WithObjectID; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.ratis.util.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Objects; +import java.util.Stack; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; + +/** + * Base Tool to identify and repair disconnected FSO trees across all buckets. + * This tool logs information about reachable, unreachable and unreferenced files and directories in debug mode + * and moves these unreferenced files and directories to the deleted tables in repair mode. + + * If deletes are still in progress (the deleted directory table is not empty), the tool + * reports that the tree is unreachable, even though pending deletes would fix the issue. + * If not, the tool reports them as unreferenced and deletes them in repair mode. + + * Before using the tool, make sure all OMs are stopped, and that all Ratis logs have been flushed to the OM DB. + * This can be done using `ozone admin prepare` before running the tool, and `ozone admin + * cancelprepare` when done. + + * The tool will run a DFS from each bucket, and save all reachable directories as keys in a new temporary RocksDB + * instance called "reachable.db" in the same directory as om.db. + * It will then scan the entire file and directory tables for each bucket to see if each object's parent is in the + * reachable table of reachable.db. The reachable table will be dropped and recreated for each bucket. + * The tool is idempotent. reachable.db will not be deleted automatically when the tool finishes, + * in case users want to manually inspect it. It can be safely deleted once the tool finishes. + */ +public class FSORepairTool { + public static final Logger LOG = LoggerFactory.getLogger(FSORepairTool.class); + + private final String omDBPath; + private final DBStore store; + private final Table volumeTable; + private final Table bucketTable; + private final Table directoryTable; + private final Table fileTable; + private final Table deletedDirectoryTable; + private final Table deletedTable; + private final Table snapshotInfoTable; + private final String volumeFilter; + private final String bucketFilter; + private static final String REACHABLE_TABLE = "reachable"; + private DBStore reachableDB; + private final ReportStatistics reachableStats; + private final ReportStatistics unreachableStats; + private final ReportStatistics unreferencedStats; + private final boolean repair; + private final boolean verbose; + + public FSORepairTool(String dbPath, boolean repair, String volume, String bucket, boolean verbose) + throws IOException { + this(getStoreFromPath(dbPath), dbPath, repair, volume, bucket, verbose); + } + + /** + * Allows passing RocksDB instance from a MiniOzoneCluster directly to this class for testing. + */ + public FSORepairTool(DBStore dbStore, String dbPath, boolean repair, String volume, String bucket, boolean verbose) + throws IOException { + this.reachableStats = new ReportStatistics(0, 0, 0); + this.unreachableStats = new ReportStatistics(0, 0, 0); + this.unreferencedStats = new ReportStatistics(0, 0, 0); + + this.store = dbStore; + this.omDBPath = dbPath; + this.repair = repair; + this.volumeFilter = volume; + this.bucketFilter = bucket; + this.verbose = verbose; + volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, + String.class, + OmVolumeArgs.class); + bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, + String.class, + OmBucketInfo.class); + directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, + String.class, + OmDirectoryInfo.class); + fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, + String.class, + OmKeyInfo.class); + deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE, + String.class, + OmKeyInfo.class); + deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE, + String.class, + RepeatedOmKeyInfo.class); + snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, + String.class, + SnapshotInfo.class); + } + + protected static DBStore getStoreFromPath(String dbPath) throws IOException { + File omDBFile = new File(dbPath); + if (!omDBFile.exists() || !omDBFile.isDirectory()) { + throw new IOException(String.format("Specified OM DB instance %s does " + + "not exist or is not a RocksDB directory.", dbPath)); + } + // Load RocksDB and tables needed. + return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1); + } + + public FSORepairTool.Report run() throws Exception { + try { + if (bucketFilter != null && volumeFilter == null) { + System.out.println("--bucket flag cannot be used without specifying --volume."); + return null; + } + + if (volumeFilter != null) { + OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter); + if (volumeArgs == null) { + System.out.println("Volume '" + volumeFilter + "' does not exist."); + return null; + } + } + + // Iterate all volumes or a specific volume if specified + try (TableIterator> + volumeIterator = volumeTable.iterator()) { + try { + openReachableDB(); + } catch (IOException e) { + System.out.println("Failed to open reachable database: " + e.getMessage()); + throw e; + } + while (volumeIterator.hasNext()) { + Table.KeyValue volumeEntry = volumeIterator.next(); + String volumeKey = volumeEntry.getKey(); + + if (volumeFilter != null && !volumeFilter.equals(volumeKey)) { + continue; + } + + System.out.println("Processing volume: " + volumeKey); + + if (bucketFilter != null) { + OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter); + if (bucketInfo == null) { + //Bucket does not exist in the volume + System.out.println("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'."); + return null; + } + + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + System.out.println("Skipping non-FSO bucket " + bucketFilter); + continue; + } + + processBucket(volumeEntry.getValue(), bucketInfo); + } else { + + // Iterate all buckets in the volume. + try (TableIterator> + bucketIterator = bucketTable.iterator()) { + bucketIterator.seek(volumeKey); + while (bucketIterator.hasNext()) { + Table.KeyValue bucketEntry = bucketIterator.next(); + String bucketKey = bucketEntry.getKey(); + OmBucketInfo bucketInfo = bucketEntry.getValue(); + + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + System.out.println("Skipping non-FSO bucket " + bucketKey); + continue; + } + + // Stop this loop once we have seen all buckets in the current + // volume. + if (!bucketKey.startsWith(volumeKey)) { + break; + } + + processBucket(volumeEntry.getValue(), bucketInfo); + } + } + } + } + } + } catch (IOException e) { + System.out.println("An error occurred while processing" + e.getMessage()); + throw e; + } finally { + closeReachableDB(); + store.close(); + } + + return buildReportAndLog(); + } + + private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException { + if (snapshotInfoTable == null) { + return false; + } + + try (TableIterator> iterator = + snapshotInfoTable.iterator()) { + while (iterator.hasNext()) { + SnapshotInfo snapshotInfo = iterator.next().getValue(); + String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", ""); + if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) { + return true; + } + } + } + return false; + } + + private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException { + System.out.println("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); + if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) { + if (!repair) { + System.out.println( + "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. "); + } else { + System.out.println( + "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + + "due to snapshot presence."); + return; + } + } + markReachableObjectsInBucket(volume, bucketInfo); + handleUnreachableAndUnreferencedObjects(volume, bucketInfo); + } + + private Report buildReportAndLog() { + Report report = new Report.Builder() + .setReachable(reachableStats) + .setUnreachable(unreachableStats) + .setUnreferenced(unreferencedStats) + .build(); + + System.out.println("\n" + report); + return report; + } + + private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Only put directories in the stack. + // Directory keys should have the form /volumeID/bucketID/parentID/name. + Stack dirKeyStack = new Stack<>(); + + // Since the tool uses parent directories to check for reachability, add + // a reachable entry for the bucket as well. + addReachableEntry(volume, bucket, bucket); + // Initialize the stack with all immediate child directories of the + // bucket, and mark them all as reachable. + Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket); + dirKeyStack.addAll(childDirs); + + while (!dirKeyStack.isEmpty()) { + // Get one directory and process its immediate children. + String currentDirKey = dirKeyStack.pop(); + OmDirectoryInfo currentDir = directoryTable.get(currentDirKey); + if (currentDir == null) { + System.out.println("Directory key" + currentDirKey + "to be processed was not found in the directory table."); + continue; + } + + // TODO revisit this for a more memory efficient implementation, + // possibly making better use of RocksDB iterators. + childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir); + dirKeyStack.addAll(childDirs); + } + } + + private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException { + return deletedDirectoryTable.isExist(dirKey); + } + + private boolean isFileKeyInDeletedTable(String fileKey) throws IOException { + return deletedTable.isExist(fileKey); + } + + private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Check for unreachable and unreferenced directories in the bucket. + String bucketPrefix = OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID(); + + try (TableIterator> dirIterator = + directoryTable.iterator()) { + dirIterator.seek(bucketPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue dirEntry = dirIterator.next(); + String dirKey = dirEntry.getKey(); + + // Only search directories in this bucket. + if (!dirKey.startsWith(bucketPrefix)) { + break; + } + + if (!isReachable(dirKey)) { + if (!isDirectoryInDeletedDirTable(dirKey)) { + System.out.println("Found unreferenced directory: " + dirKey); + unreferencedStats.addDir(); + + if (!repair) { + if (verbose) { + System.out.println("Marking unreferenced directory " + dirKey + " for deletion."); + } + } else { + System.out.println("Deleting unreferenced directory " + dirKey); + OmDirectoryInfo dirInfo = dirEntry.getValue(); + markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo); + } + } else { + unreachableStats.addDir(); + } + } + } + } + + // Check for unreachable and unreferenced files + try (TableIterator> + fileIterator = fileTable.iterator()) { + fileIterator.seek(bucketPrefix); + while (fileIterator.hasNext()) { + Table.KeyValue fileEntry = fileIterator.next(); + String fileKey = fileEntry.getKey(); + // Only search files in this bucket. + if (!fileKey.startsWith(bucketPrefix)) { + break; + } + + OmKeyInfo fileInfo = fileEntry.getValue(); + if (!isReachable(fileKey)) { + if (!isFileKeyInDeletedTable(fileKey)) { + System.out.println("Found unreferenced file: " + fileKey); + unreferencedStats.addFile(fileInfo.getDataSize()); + + if (!repair) { + if (verbose) { + System.out.println("Marking unreferenced file " + fileKey + " for deletion." + fileKey); + } + } else { + System.out.println("Deleting unreferenced file " + fileKey); + markFileForDeletion(fileKey, fileInfo); + } + } else { + unreachableStats.addFile(fileInfo.getDataSize()); + } + } else { + // NOTE: We are deserializing the proto of every reachable file + // just to log it's size. If we don't need this information we could + // save time by skipping this step. + reachableStats.addFile(fileInfo.getDataSize()); + } + } + } + } + + protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + fileTable.deleteWithBatch(batch, fileKey); + + RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey); + RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + fileInfo, fileInfo.getUpdateID(), true); + // NOTE: The FSO code seems to write the open key entry with the whole + // path, using the object's names instead of their ID. This would only + // be possible when the file is deleted explicitly, and not part of a + // directory delete. It is also not possible here if the file's parent + // is gone. The name of the key does not matter so just use IDs. + deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo); + if (verbose) { + System.out.println("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo); + } + store.commitBatchOperation(batch); + } + } + + protected void markDirectoryForDeletion(String volumeName, String bucketName, + String dirKeyName, OmDirectoryInfo dirInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + directoryTable.deleteWithBatch(batch, dirKeyName); + // HDDS-7592: Make directory entries in deleted dir table unique. + String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID(); + + // Convert the directory to OmKeyInfo for deletion. + OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName()); + deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo); + + store.commitBatchOperation(batch); + } + } + + private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket, + WithObjectID currentDir) throws IOException { + + Collection childDirs = new ArrayList<>(); + + try (TableIterator> + dirIterator = directoryTable.iterator()) { + String dirPrefix = buildReachableKey(volume, bucket, currentDir); + // Start searching the directory table at the current directory's + // prefix to get its immediate children. + dirIterator.seek(dirPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue childDirEntry = dirIterator.next(); + String childDirKey = childDirEntry.getKey(); + // Stop processing once we have seen all immediate children of this + // directory. + if (!childDirKey.startsWith(dirPrefix)) { + break; + } + // This directory was reached by search. + addReachableEntry(volume, bucket, childDirEntry.getValue()); + childDirs.add(childDirKey); + reachableStats.addDir(); + } + } + + return childDirs; + } + + /** + * Add the specified object to the reachable table, indicating it is part + * of the connected FSO tree. + */ + private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException { + String reachableKey = buildReachableKey(volume, bucket, object); + // No value is needed for this table. + reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{}); + } + + /** + * Build an entry in the reachable table for the current object, which + * could be a bucket, file or directory. + */ + private static String buildReachableKey(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) { + return OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID() + + OM_KEY_PREFIX + + object.getObjectID(); + } + + /** + * + * @param fileOrDirKey The key of a file or directory in RocksDB. + * @return true if the entry's parent is in the reachable table. + */ + protected boolean isReachable(String fileOrDirKey) throws IOException { + String reachableParentKey = buildReachableParentKey(fileOrDirKey); + + return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null; + } + + /** + * Build an entry in the reachable table for the current object's parent + * object. The object could be a file or directory. + */ + private static String buildReachableParentKey(String fileOrDirKey) { + String[] keyParts = fileOrDirKey.split(OM_KEY_PREFIX); + // Should be /volID/bucketID/parentID/name + // The first part will be blank since key begins with a slash. + Preconditions.assertTrue(keyParts.length >= 4); + String volumeID = keyParts[1]; + String bucketID = keyParts[2]; + String parentID = keyParts[3]; + + return OM_KEY_PREFIX + + volumeID + + OM_KEY_PREFIX + + bucketID + + OM_KEY_PREFIX + + parentID; + } + + private void openReachableDB() throws IOException { + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + System.out.println("Creating database of reachable directories at " + reachableDBFile); + // Delete the DB from the last run if it exists. + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + + ConfigurationSource conf = new OzoneConfiguration(); + reachableDB = DBStoreBuilder.newBuilder(conf) + .setName("reachable.db") + .setPath(reachableDBFile.getParentFile().toPath()) + .addTable(REACHABLE_TABLE) + .build(); + } + + private void closeReachableDB() throws IOException { + if (reachableDB != null) { + reachableDB.close(); + } + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + } + + /** + * Define a Report to be created. + */ + public static class Report { + private final ReportStatistics reachable; + private final ReportStatistics unreachable; + private final ReportStatistics unreferenced; + + /** + * Builds one report that is the aggregate of multiple others. + */ + public Report(FSORepairTool.Report... reports) { + reachable = new ReportStatistics(); + unreachable = new ReportStatistics(); + unreferenced = new ReportStatistics(); + + for (FSORepairTool.Report report : reports) { + reachable.add(report.reachable); + unreachable.add(report.unreachable); + unreferenced.add(report.unreferenced); + } + } + + private Report(FSORepairTool.Report.Builder builder) { + this.reachable = builder.reachable; + this.unreachable = builder.unreachable; + this.unreferenced = builder.unreferenced; + } + + public ReportStatistics getReachable() { + return reachable; + } + + public ReportStatistics getUnreachable() { + return unreachable; + } + + public ReportStatistics getUnreferenced() { + return unreferenced; + } + + public String toString() { + return "Reachable:" + reachable + "\nUnreachable:" + unreachable + "\nUnreferenced:" + unreferenced; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + FSORepairTool.Report report = (FSORepairTool.Report) other; + + // Useful for testing. + System.out.println("Comparing reports\nExpect:\n" + this + "\nActual:\n" + report); + + return reachable.equals(report.reachable) && unreachable.equals(report.unreachable) && + unreferenced.equals(report.unreferenced); + } + + @Override + public int hashCode() { + return Objects.hash(reachable, unreachable, unreferenced); + } + + /** + * Builder class for a Report. + */ + public static final class Builder { + private ReportStatistics reachable = new ReportStatistics(); + private ReportStatistics unreachable = new ReportStatistics(); + private ReportStatistics unreferenced = new ReportStatistics(); + + public Builder() { + } + + public Builder setReachable(ReportStatistics reachable) { + this.reachable = reachable; + return this; + } + + public Builder setUnreachable(ReportStatistics unreachable) { + this.unreachable = unreachable; + return this; + } + + public Builder setUnreferenced(ReportStatistics unreferenced) { + this.unreferenced = unreferenced; + return this; + } + + public Report build() { + return new Report(this); + } + } + } + + /** + * Represents the statistics of reachable and unreachable data. + * This gives the count of dirs, files and bytes. + */ + + public static class ReportStatistics { + private long dirs; + private long files; + private long bytes; + + public ReportStatistics() { } + + public ReportStatistics(long dirs, long files, long bytes) { + this.dirs = dirs; + this.files = files; + this.bytes = bytes; + } + + public void add(ReportStatistics other) { + this.dirs += other.dirs; + this.files += other.files; + this.bytes += other.bytes; + } + + public long getDirs() { + return dirs; + } + + public long getFiles() { + return files; + } + + public long getBytes() { + return bytes; + } + + @Override + public String toString() { + return "\n\tDirectories: " + dirs + + "\n\tFiles: " + files + + "\n\tBytes: " + bytes; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + ReportStatistics stats = (ReportStatistics) other; + + return bytes == stats.bytes && files == stats.files && dirs == stats.dirs; + } + + @Override + public int hashCode() { + return Objects.hash(bytes, files, dirs); + } + + public void addDir() { + dirs++; + } + + public void addFile(long size) { + files++; + bytes += size; + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java new file mode 100644 index 00000000000..56d42d23f49 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Ozone Repair CLI for OM. + */ +@CommandLine.Command(name = "om", + subcommands = { + FSORepairCLI.class, + }, + description = "Operational tool to repair OM.") +@MetaInfServices(RepairSubcommand.class) +public class OMRepair implements Callable, RepairSubcommand { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @Override + public Void call() { + GenericCli.missingSubcommand(spec); + return null; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java new file mode 100644 index 00000000000..6ead713e148 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.quota; + +import java.io.IOException; +import java.util.Collection; +import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.om.protocolPB.Hadoop3OmTransportFactory; +import org.apache.hadoop.ozone.om.protocolPB.OmTransport; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; +import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ratis.protocol.ClientId; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; + +/** + * Ozone Repair CLI for quota. + */ +@CommandLine.Command(name = "quota", + subcommands = { + QuotaStatus.class, + QuotaTrigger.class, + }, + description = "Operational tool to repair quota in OM DB.") +@MetaInfServices(RepairSubcommand.class) +public class QuotaRepair implements Callable, RepairSubcommand { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @CommandLine.ParentCommand + private OzoneRepair parent; + + @Override + public Void call() { + GenericCli.missingSubcommand(spec); + return null; + } + + public OzoneManagerProtocolClientSideTranslatorPB createOmClient( + String omServiceID, + String omHost, + boolean forceHA + ) throws Exception { + OzoneConfiguration conf = parent.getOzoneConf(); + if (omHost != null && !omHost.isEmpty()) { + omServiceID = null; + conf.set(OZONE_OM_ADDRESS_KEY, omHost); + } else if (omServiceID == null || omServiceID.isEmpty()) { + omServiceID = getTheOnlyConfiguredOmServiceIdOrThrow(); + } + RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, + ProtobufRpcEngine.class); + String clientId = ClientId.randomId().toString(); + if (!forceHA || (forceHA && OmUtils.isOmHAServiceId(conf, omServiceID))) { + OmTransport omTransport = new Hadoop3OmTransportFactory() + .createOmTransport(conf, getUser(), omServiceID); + return new OzoneManagerProtocolClientSideTranslatorPB(omTransport, + clientId); + } else { + throw new OzoneClientException("This command works only on OzoneManager" + + " HA cluster. Service ID specified does not match" + + " with " + OZONE_OM_SERVICE_IDS_KEY + " defined in the " + + "configuration. Configured " + OZONE_OM_SERVICE_IDS_KEY + " are " + + conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY) + "\n"); + } + } + + private String getTheOnlyConfiguredOmServiceIdOrThrow() { + if (getConfiguredServiceIds().size() != 1) { + throw new IllegalArgumentException("There is no Ozone Manager service ID " + + "specified, but there are either zero, or more than one service ID" + + "configured."); + } + return getConfiguredServiceIds().iterator().next(); + } + + private Collection getConfiguredServiceIds() { + OzoneConfiguration conf = parent.getOzoneConf(); + Collection omServiceIds = + conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); + return omServiceIds; + } + + public UserGroupInformation getUser() throws IOException { + return UserGroupInformation.getCurrentUser(); + } + + protected OzoneRepair getParent() { + return parent; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java new file mode 100644 index 00000000000..820ac6f8eaf --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license + * agreements. See the NOTICE file distributed with this work for additional + * information regarding + * copyright ownership. The ASF licenses this file to you under the Apache + * License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a + * copy of the License at + * + *

    http://www.apache.org/licenses/LICENSE-2.0 + * + *

    Unless required by applicable law or agreed to in writing, software + * distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair.quota; + +import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import picocli.CommandLine; + +/** + * Tool to get status of last triggered quota repair. + */ +@CommandLine.Command( + name = "status", + description = "CLI to get the status of last trigger quota repair if available.", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class +) +public class QuotaStatus implements Callable { + @CommandLine.Spec + private static CommandLine.Model.CommandSpec spec; + + @CommandLine.Option( + names = {"--service-id", "--om-service-id"}, + description = "Ozone Manager Service ID", + required = false + ) + private String omServiceId; + + @CommandLine.Option( + names = {"--service-host"}, + description = "Ozone Manager Host. If OM HA is enabled, use --service-id instead. " + + "If you must use --service-host with OM HA, this must point directly to the leader OM. " + + "This option is required when --service-id is not provided or when HA is not enabled." + ) + private String omHost; + + @CommandLine.ParentCommand + private QuotaRepair parent; + + @Override + public Void call() throws Exception { + OzoneManagerProtocol ozoneManagerClient = + parent.createOmClient(omServiceId, omHost, false); + System.out.println(ozoneManagerClient.getQuotaRepairStatus()); + return null; + } + + protected QuotaRepair getParent() { + return parent; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java new file mode 100644 index 00000000000..04d78f05dc6 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license + * agreements. See the NOTICE file distributed with this work for additional + * information regarding + * copyright ownership. The ASF licenses this file to you under the Apache + * License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a + * copy of the License at + * + *

    http://www.apache.org/licenses/LICENSE-2.0 + * + *

    Unless required by applicable law or agreed to in writing, software + * distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair.quota; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Callable; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import picocli.CommandLine; + +/** + * Tool to trigger quota repair. + */ +@CommandLine.Command( + name = "start", + description = "CLI to trigger quota repair.", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class +) +public class QuotaTrigger implements Callable { + @CommandLine.Spec + private static CommandLine.Model.CommandSpec spec; + + @CommandLine.ParentCommand + private QuotaRepair parent; + + @CommandLine.Option( + names = {"--service-id", "--om-service-id"}, + description = "Ozone Manager Service ID", + required = false + ) + private String omServiceId; + + @CommandLine.Option( + names = {"--service-host"}, + description = "Ozone Manager Host. If OM HA is enabled, use --service-id instead. " + + "If you must use --service-host with OM HA, this must point directly to the leader OM. " + + "This option is required when --service-id is not provided or when HA is not enabled." + ) + private String omHost; + + @CommandLine.Option(names = {"--buckets"}, + required = false, + description = "start quota repair for specific buckets. Input will be list of uri separated by comma as" + + " //[,...]") + private String buckets; + + @Override + public Void call() throws Exception { + List bucketList = Collections.emptyList(); + if (StringUtils.isNotEmpty(buckets)) { + bucketList = Arrays.asList(buckets.split(",")); + } + + OzoneManagerProtocol ozoneManagerClient = + parent.createOmClient(omServiceId, omHost, false); + try { + ozoneManagerClient.startQuotaRepair(bucketList); + System.out.println(ozoneManagerClient.getQuotaRepairStatus()); + } catch (Exception ex) { + System.out.println(ex.getMessage()); + } + return null; + } + + protected QuotaRepair getParent() { + return parent; + } + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java new file mode 100644 index 00000000000..40c0abcb916 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Ozone Quota Repair tools. + */ +package org.apache.hadoop.ozone.repair.quota; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java index ae5edf5b1f7..d1755a68806 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java @@ -69,7 +69,7 @@ protected OzoneAddress getAddress() throws OzoneClientException { } protected abstract void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException; + throws IOException; /** * Checks whether the current command should be executed or not. @@ -102,7 +102,7 @@ public Void call() throws Exception { } protected OzoneClient createClient(OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { return address.createClient(conf); } @@ -111,7 +111,7 @@ protected boolean securityEnabled() { if (!enabled) { err().printf("Error: '%s' operation works only when security is " + "enabled. To enable security set ozone.security.enabled to " + - "true.%n", spec.qualifiedName()); + "true.%n", spec.qualifiedName().trim()); } return enabled; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java index f2fa1a8c4f3..ae5b5ad566e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java @@ -123,7 +123,7 @@ protected OzoneClient createRpcClientFromServiceId( } public OzoneClient createClient(MutableConfigurationSource conf) - throws IOException, OzoneClientException { + throws IOException { OzoneClient client; String scheme = ozoneURI.getScheme(); if (ozoneURI.getScheme() == null || scheme.isEmpty()) { @@ -185,13 +185,12 @@ public OzoneClient createClient(MutableConfigurationSource conf) * @param omServiceID * @return OzoneClient * @throws IOException - * @throws OzoneClientException */ public OzoneClient createClientForS3Commands( OzoneConfiguration conf, String omServiceID ) - throws IOException, OzoneClientException { + throws IOException { Collection serviceIds = conf. getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); if (omServiceID != null) { @@ -227,8 +226,7 @@ public OzoneClient createClientForS3Commands( * @param uri - UriString * @return URI */ - protected URI parseURI(String uri) - throws OzoneClientException { + protected URI parseURI(String uri) throws OzoneClientException { if ((uri == null) || uri.isEmpty()) { throw new OzoneClientException( "Ozone URI is needed to execute this command."); @@ -422,7 +420,7 @@ public void ensureVolumeAddress() throws OzoneClientException { } } - public void ensureRootAddress() throws OzoneClientException { + public void ensureRootAddress() throws OzoneClientException { if (keyName.length() != 0 || bucketName.length() != 0 || volumeName.length() != 0) { throw new OzoneClientException( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java new file mode 100644 index 00000000000..20f6f683cbf --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.ratis.shell.cli.sh.RatisShell; + +import picocli.CommandLine; + +/** + * Ozone Ratis Command line tool. + */ +@CommandLine.Command(name = "ozone ratis", + description = "Shell for running Ratis commands", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneRatis extends GenericCli { + + public static void main(String[] argv) throws Exception { + new OzoneRatis().run(argv); + } + + @Override + public int execute(String[] argv) { + TracingUtil.initTracing("shell", createOzoneConfiguration()); + String spanName = "ozone ratis" + String.join(" ", argv); + return TracingUtil.executeInNewSpan(spanName, () -> { + // TODO: When Ozone has RATIS-2155, update this line to use the RatisShell.Builder + // in order to setup TLS and other confs. + final RatisShell shell = new RatisShell(System.out); + return shell.run(argv); + }); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java index 04b2c706f7e..925e3bc13ec 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java @@ -19,6 +19,13 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.ozone.shell.bucket.BucketCommands; +import org.apache.hadoop.ozone.shell.keys.KeyCommands; +import org.apache.hadoop.ozone.shell.prefix.PrefixCommands; +import org.apache.hadoop.ozone.shell.snapshot.SnapshotCommands; +import org.apache.hadoop.ozone.shell.tenant.TenantUserCommands; +import org.apache.hadoop.ozone.shell.token.TokenCommands; +import org.apache.hadoop.ozone.shell.volume.VolumeCommands; import picocli.CommandLine.Command; @@ -27,14 +34,19 @@ */ @Command(name = "ozone sh", description = "Shell for Ozone object store", + subcommands = { + BucketCommands.class, + KeyCommands.class, + PrefixCommands.class, + SnapshotCommands.class, + TenantUserCommands.class, + TokenCommands.class, + VolumeCommands.class, + }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) public class OzoneShell extends Shell { - public OzoneShell() { - super(OzoneShell.class); - } - /** * Main for the ozShell Command handling. * diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java new file mode 100644 index 00000000000..14848846348 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell; + +import org.jline.console.SystemRegistry; +import org.jline.console.impl.SystemRegistryImpl; +import org.jline.reader.EndOfFileException; +import org.jline.reader.LineReader; +import org.jline.reader.LineReaderBuilder; +import org.jline.reader.MaskingCallback; +import org.jline.reader.Parser; +import org.jline.reader.UserInterruptException; +import org.jline.reader.impl.DefaultParser; +import org.jline.terminal.Terminal; +import org.jline.terminal.TerminalBuilder; +import org.jline.widget.TailTipWidgets; +import org.jline.widget.TailTipWidgets.TipType; +import picocli.CommandLine; +import picocli.shell.jline3.PicocliCommands; +import picocli.shell.jline3.PicocliCommands.PicocliCommandsFactory; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.function.Supplier; + +/** + * Interactive shell for Ozone commands. + * (REPL = Read-Eval-Print Loop) + */ +class REPL { + + REPL(Shell shell, CommandLine cmd, PicocliCommandsFactory factory) { + Parser parser = new DefaultParser(); + Supplier workDir = () -> Paths.get(System.getProperty("user.dir")); + TerminalBuilder terminalBuilder = TerminalBuilder.builder() + .dumb(true); + try (Terminal terminal = terminalBuilder.build()) { + factory.setTerminal(terminal); + + PicocliCommands picocliCommands = new PicocliCommands(cmd); + picocliCommands.name(shell.name()); + SystemRegistry registry = new SystemRegistryImpl(parser, terminal, workDir, null); + registry.setCommandRegistries(picocliCommands); + registry.register("help", picocliCommands); + + LineReader reader = LineReaderBuilder.builder() + .terminal(terminal) + .completer(registry.completer()) + .parser(parser) + .variable(LineReader.LIST_MAX, 50) + .build(); + + TailTipWidgets widgets = new TailTipWidgets(reader, registry::commandDescription, 5, TipType.COMPLETER); + widgets.enable(); + + String prompt = shell.prompt() + "> "; + + while (true) { + try { + registry.cleanUp(); + String line = reader.readLine(prompt, null, (MaskingCallback) null, null); + registry.execute(line); + } catch (UserInterruptException ignored) { + // ignore + } catch (EndOfFileException e) { + return; + } catch (Exception e) { + registry.trace(e); + } + } + } catch (Exception e) { + shell.printError(e); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java index 97e160651bb..3291ce87b08 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.ozone.om.exceptions.OMException; +import picocli.CommandLine; +import picocli.shell.jline3.PicocliCommands.PicocliCommandsFactory; /** * Ozone user interface commands. @@ -27,6 +29,7 @@ * This class uses dispatch method to make calls * to appropriate handlers that execute the ozone functions. */ +@CommandLine.Command public abstract class Shell extends GenericCli { public static final String OZONE_URI_DESCRIPTION = @@ -46,15 +49,48 @@ public abstract class Shell extends GenericCli { "Any unspecified information will be identified from\n" + "the config files.\n"; + private String name; + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @CommandLine.Option(names = { "--interactive" }, description = "Run in interactive mode") + private boolean interactive; + public Shell() { + super(new PicocliCommandsFactory()); + } + + public String name() { + return name; + } + + // override if custom prompt is needed + public String prompt() { + return name(); } - public Shell(Class type) { - super(type); + @Override + public void run(String[] argv) { + name = spec.name(); + + try { + // parse args to check if interactive mode is requested + getCmd().parseArgs(argv); + } catch (Exception ignored) { + // failure will be reported by regular, non-interactive run + } + + if (interactive) { + spec.name(""); // use short name (e.g. "token get" instead of "ozone sh token get") + new REPL(this, getCmd(), (PicocliCommandsFactory) getCmd().getFactory()); + } else { + super.run(argv); + } } @Override - protected void printError(Throwable errorArg) { + public void printError(Throwable errorArg) { OMException omException = null; if (errorArg instanceof OMException) { @@ -77,4 +113,3 @@ protected void printError(Throwable errorArg) { } } } - diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java index 8fbab644c0e..c06d29a7f93 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java @@ -21,7 +21,7 @@ import picocli.CommandLine; /** - * Option for {@link OzoneObj.StoreType}. + * Option for {@link org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType}. */ public class StoreTypeOption implements CommandLine.ITypeConverter { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index 80e26e04451..8a92de696a7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -55,9 +52,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class BucketCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class BucketCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -77,9 +72,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java index 39044db797a..9d9bc3dd6e6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java @@ -53,7 +53,7 @@ public void execute(OzoneClient client, OzoneAddress address) */ public static class LinkBucket { private String volumeName; - private String bucketName; + private String name; private String sourceVolume; private String sourceBucket; private Instant creationTime; @@ -63,7 +63,7 @@ public static class LinkBucket { LinkBucket(OzoneBucket ozoneBucket) { this.volumeName = ozoneBucket.getVolumeName(); - this.bucketName = ozoneBucket.getName(); + this.name = ozoneBucket.getName(); this.sourceVolume = ozoneBucket.getSourceVolume(); this.sourceBucket = ozoneBucket.getSourceBucket(); this.creationTime = ozoneBucket.getCreationTime(); @@ -76,8 +76,8 @@ public String getVolumeName() { return volumeName; } - public String getBucketName() { - return bucketName; + public String getName() { + return name; } public String getSourceVolume() { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java index 86a50e9df3c..3df65165fa8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java @@ -19,7 +19,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine; @@ -70,7 +69,7 @@ public class SetEncryptionKey extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java index 45d66fd1c3d..258a73aa93b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java @@ -21,7 +21,6 @@ import org.apache.hadoop.ozone.OzoneIllegalArgumentException; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.ShellReplicationOptions; import picocli.CommandLine; @@ -40,7 +39,7 @@ public class SetReplicationConfigHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { ReplicationConfig replicationConfig = replication.fromParams(getConf()) .orElseThrow(() -> new OzoneIllegalArgumentException( "Replication type and config must be specified.")); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java index 7ba62a5ce1c..e36fbce63e8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java @@ -19,7 +19,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -40,7 +39,7 @@ public class UpdateBucketHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java index 63b2b425c64..f19548a1fa7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java @@ -19,10 +19,15 @@ package org.apache.hadoop.ozone.shell.checknative; import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.utils.NativeLibraryLoader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; -import org.apache.hadoop.util.NativeCodeLoader; import picocli.CommandLine; +import java.util.Collections; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + /** * CLI command to check if native libraries are loaded. */ @@ -36,12 +41,12 @@ public static void main(String[] argv) { @Override public Void call() throws Exception { - boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded(); + boolean nativeHadoopLoaded = org.apache.hadoop.util.NativeCodeLoader.isNativeCodeLoaded(); String hadoopLibraryName = ""; String isalDetail = ""; boolean isalLoaded = false; if (nativeHadoopLoaded) { - hadoopLibraryName = NativeCodeLoader.getLibraryName(); + hadoopLibraryName = org.apache.hadoop.util.NativeCodeLoader.getLibraryName(); isalDetail = ErasureCodeNative.getLoadingFailureReason(); if (isalDetail != null) { @@ -50,12 +55,21 @@ public Void call() throws Exception { isalDetail = ErasureCodeNative.getLibraryName(); isalLoaded = true; } - } System.out.println("Native library checking:"); System.out.printf("hadoop: %b %s%n", nativeHadoopLoaded, hadoopLibraryName); System.out.printf("ISA-L: %b %s%n", isalLoaded, isalDetail); + + // Attempt to load the rocks-tools lib + boolean nativeRocksToolsLoaded = NativeLibraryLoader.getInstance().loadLibrary( + ROCKS_TOOLS_NATIVE_LIBRARY_NAME, + Collections.singletonList(ManagedRocksObjectUtils.getRocksDBLibFileName())); + String rocksToolsDetail = ""; + if (nativeRocksToolsLoaded) { + rocksToolsDetail = NativeLibraryLoader.getJniLibraryFileName(); + } + System.out.printf("rocks-tools: %b %s%n", nativeRocksToolsLoaded, rocksToolsDetail); return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java index 328c56f82e3..41a3e142a8d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java @@ -21,7 +21,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -38,7 +37,7 @@ public class CatKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java index b8acf783d6f..6bb25dc2ad8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -49,7 +48,7 @@ public class ChecksumKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { ChecksumInfo checksumInfo = new ChecksumInfo(address, client, mode); printObjectAsJson(checksumInfo); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java index 81d2dbcae6a..a304dada153 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -64,7 +63,7 @@ public class CopyKeyHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java index 4c795f1e82b..a67343976e7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java @@ -22,7 +22,6 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -55,7 +54,7 @@ public class DeleteKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java index 501a64238f0..c01f93da9c7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java @@ -61,7 +61,7 @@ public class GetKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java index bbef5841439..f4ac9e1fe8f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -55,9 +52,8 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) public class KeyCommands - implements GenericParentCommand, Callable, SubcommandWithParent { + implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -77,9 +73,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java index 00652b58a95..c96e5e2b59e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java @@ -24,7 +24,6 @@ import com.google.common.base.Strings; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.ListOptions; @@ -47,7 +46,7 @@ public class ListKeyHandler extends VolumeBucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { if (!Strings.isNullOrEmpty(address.getBucketName())) { listKeysInsideBucket(client, address); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java index 833f4f7e779..30543f79074 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -71,11 +70,11 @@ public class PutKeyHandler extends KeyHandler { @Option(names = "--expectedGeneration", description = "Store key only if it already exists and its generation matches the value provided") - private long expectedGeneration; + private Long expectedGeneration; @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); @@ -131,9 +130,14 @@ private void async( private OzoneOutputStream createOrReplaceKey(OzoneBucket bucket, String keyName, long size, Map keyMetadata, ReplicationConfig replicationConfig ) throws IOException { - return expectedGeneration > 0 - ? bucket.rewriteKey(keyName, size, expectedGeneration, replicationConfig, keyMetadata) - : bucket.createKey(keyName, size, replicationConfig, keyMetadata); + if (expectedGeneration != null) { + final long existingGeneration = expectedGeneration; + Preconditions.checkArgument(existingGeneration > 0, + "expectedGeneration must be positive, but was %s", existingGeneration); + return bucket.rewriteKey(keyName, size, existingGeneration, replicationConfig, keyMetadata); + } + + return bucket.createKey(keyName, size, replicationConfig, keyMetadata); } private void stream( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java index f71ac094faf..e48f0804967 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java @@ -19,7 +19,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.bucket.BucketHandler; @@ -46,7 +45,7 @@ public class RenameKeyHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java index 35e8da5f381..3dead7a979f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java @@ -21,7 +21,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.MandatoryReplicationOptions; @@ -45,7 +44,7 @@ public class RewriteKeyHandler extends KeyHandler { private MandatoryReplicationOptions replication; @Override - protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + protected void execute(OzoneClient client, OzoneAddress address) throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java index e2d703bbf21..f058c4214d2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -45,9 +42,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class PrefixCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class PrefixCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -67,9 +62,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java index 91cde308cb2..62b36230fcd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java @@ -49,7 +49,7 @@ protected OzoneAddress getAddress() throws OzoneClientException { @Override protected OzoneClient createClient(OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { return address.createClientForS3Commands(getConf(), omServiceID); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java index 63b61b1ec66..f7569cac92e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java @@ -20,7 +20,6 @@ import java.io.IOException; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.Handler; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.bucket.BucketUri; @@ -50,7 +49,7 @@ protected OzoneAddress getAddress() { } @Override - protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + protected void execute(OzoneClient client, OzoneAddress address) throws IOException { String volumeName = snapshotPath.getValue().getVolumeName(); String bucketName = snapshotPath.getValue().getBucketName(); OmUtils.validateSnapshotName(snapshotNewName); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index 25a3c1c66fe..e4ae7f5ad7a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -48,9 +45,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class SnapshotCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class SnapshotCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -70,9 +65,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java index a76ab7420af..8800d22e61e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java @@ -47,7 +47,7 @@ protected OzoneAddress getAddress() throws OzoneClientException { @Override protected OzoneClient createClient(OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { return address.createClientForS3Commands(getConf(), omServiceID); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java index 86a051fb76e..baff85d0bf2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java @@ -20,11 +20,8 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; import java.util.concurrent.Callable; @@ -46,9 +43,8 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) public class TenantUserCommands implements - GenericParentCommand, Callable, SubcommandWithParent { + GenericParentCommand, Callable { @CommandLine.ParentCommand private Shell shell; @@ -68,9 +64,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java index 73c9264a807..f76f88b6655 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.shell.token; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -34,7 +33,7 @@ public class CancelTokenHandler extends TokenHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { client.getObjectStore().cancelDelegationToken(getToken()); out().printf("Token canceled successfully.%n"); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java index dadba506ae4..133e983dd1c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java @@ -65,7 +65,7 @@ protected boolean isApplicable() { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { Token token = client.getObjectStore() .getDelegationToken(new Text(renewer.getValue())); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java index 581093050ae..8b578b0f172 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.shell.token; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -35,7 +34,7 @@ public class RenewTokenHandler extends TokenHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { long expiryTime = client.getObjectStore().renewDelegationToken(getToken()); out().printf("Token renewed successfully, expiry time: %s.%n", Instant.ofEpochMilli(expiryTime)); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java index 5b449c6cc54..3223b5b49ed 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -45,9 +42,8 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) public class TokenCommands - implements GenericParentCommand, Callable, SubcommandWithParent { + implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -67,9 +63,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java index 8cc80502386..00270310737 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java @@ -227,11 +227,7 @@ private void doCleanBuckets() throws InterruptedException { // wait until all Buckets are cleaned or exception occurred. while (numberOfBucketsCleaned.get() != totalBucketCount && exception == null) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw e; - } + Thread.sleep(100); } } catch (InterruptedException e) { LOG.error("Failed to wait until all Buckets are cleaned", e); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java index 8c52f0ada95..1cf88552030 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -53,9 +50,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class VolumeCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class VolumeCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -75,9 +70,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java new file mode 100644 index 00000000000..bc861bffafe --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.utils; + +import java.util.Map; + +/** + * Represent class which has info of what operation and value a set of records should be filtered with. + */ +public class Filter { + private FilterOperator operator; + private Object value; + private Map nextLevel = null; + + public Filter() { + this.operator = null; + this.value = null; + } + + public Filter(FilterOperator operator, Object value) { + this.operator = operator; + this.value = value; + } + + public Filter(String op, Object value) { + this.operator = getFilterOperator(op); + this.value = value; + } + + public Filter(FilterOperator operator, Object value, Map next) { + this.operator = operator; + this.value = value; + this.nextLevel = next; + } + + public Filter(String op, Object value, Map next) { + this.operator = getFilterOperator(op); + this.value = value; + this.nextLevel = next; + } + + public FilterOperator getOperator() { + return operator; + } + + public void setOperator(FilterOperator operator) { + this.operator = operator; + } + + public Object getValue() { + return value; + } + + public void setValue(Object value) { + this.value = value; + } + + public Map getNextLevel() { + return nextLevel; + } + + public void setNextLevel(Map nextLevel) { + this.nextLevel = nextLevel; + } + + public FilterOperator getFilterOperator(String op) { + if (op.equalsIgnoreCase("equals")) { + return FilterOperator.EQUALS; + } else if (op.equalsIgnoreCase("GREATER")) { + return FilterOperator.GREATER; + } else if (op.equalsIgnoreCase("LESSER")) { + return FilterOperator.LESSER; + } else if (op.equalsIgnoreCase("REGEX")) { + return FilterOperator.REGEX; + } else { + return null; + } + } + + @Override + public String toString() { + return "(" + operator + "," + value + "," + nextLevel + ")"; + } + + /** + * Operation of the filter. + */ + public enum FilterOperator { + EQUALS, + LESSER, + GREATER, + REGEX; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/FormattingCLIUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/FormattingCLIUtils.java new file mode 100644 index 00000000000..050f1b06e7a --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/FormattingCLIUtils.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.utils; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * We define this class to output information in a tabular format, + * making the printed information easier to read. + * + * For example, in OM output: + * If it's in HA mode: + * + * +------------------------------------------------------+ + * | Ozone Manager Roles | + * +---------------------------------+---------+----------+ + * | Host Name | Node ID | Role | + * +---------------------------------+---------+----------+ + * | bigdata-ozone-online32 | om32 | FOLLOWER | + * | bigdata-ozone-online30 | om30 | FOLLOWER | + * | bigdata-ozone-online31 | om31 | LEADER | + * +---------------------------------+---------+----------+ + */ +public final class FormattingCLIUtils { + /** Table title. */ + private String title; + /** Last processed row type. */ + private TableRowType lastTableRowType; + /** StringBuilder object used to concatenate strings. */ + private StringBuilder join; + /** An ordered Map that holds each row of data. */ + private List tableRows; + /** Maps the maximum length of each column. */ + private Map maxColMap; + + /** + * Contains the title constructor. + * @param title titleName + */ + public FormattingCLIUtils(String title) { + this.init(); + this.title = title; + } + + /** + * Initialize the data. + */ + private void init() { + this.join = new StringBuilder(); + this.tableRows = new ArrayList<>(); + this.maxColMap = new HashMap<>(); + } + + /** + * Adds elements from the collection to the header data in the table. + * @param headers Header data + * @return FormattingCLIUtils object + */ + public FormattingCLIUtils addHeaders(List headers) { + return this.appendRows(TableRowType.HEADER, headers.toArray()); + } + + /** + * Adds a row of normal data to the table. + * @param objects Common row data + * @return FormattingCLIUtils object + */ + public FormattingCLIUtils addLine(Object[] objects) { + return this.appendRows(TableRowType.LINE, objects); + } + + /** + * Adds the middle row of data to the table. + * @param tableRowType TableRowType + * @param objects Table row data + * @return FormattingCLIUtils object + */ + private FormattingCLIUtils appendRows(TableRowType tableRowType, Object[] objects) { + if (objects != null && objects.length > 0) { + int len = objects.length; + if (this.maxColMap.size() > len) { + throw new IllegalArgumentException("The number of columns that inserted a row " + + "of data into the table is different from the number of previous columns, check!"); + } + List lines = new ArrayList<>(); + for (int i = 0; i < len; i++) { + Object o = objects[i]; + String value = o == null ? "null" : o.toString(); + lines.add(value); + Integer maxColSize = this.maxColMap.get(i); + if (maxColSize == null) { + this.maxColMap.put(i, value.length()); + continue; + } + if (value.length() > maxColSize) { + this.maxColMap.put(i, value.length()); + } + } + this.tableRows.add(new TableRow(tableRowType, lines)); + } + return this; + } + + /** + * Builds the string for the row of the table title. + */ + private void buildTitle() { + if (this.title != null) { + int maxTitleSize = 0; + for (Integer maxColSize : this.maxColMap.values()) { + maxTitleSize += maxColSize; + } + maxTitleSize += 3 * (this.maxColMap.size() - 1); + if (this.title.length() > maxTitleSize) { + this.title = this.title.substring(0, maxTitleSize); + } + this.join.append("+"); + for (int i = 0; i < maxTitleSize + 2; i++) { + this.join.append("-"); + } + this.join.append("+\n") + .append("|") + .append(StrUtils.center(this.title, maxTitleSize + 2, ' ')) + .append("|\n"); + this.lastTableRowType = TableRowType.TITLE; + } + } + + /** + * Build the table, first build the title, and then walk through each row of data to build. + */ + private void buildTable() { + this.buildTitle(); + for (int i = 0, len = this.tableRows.size(); i < len; i++) { + List data = this.tableRows.get(i).data; + switch (this.tableRows.get(i).tableRowType) { + case HEADER: + if (this.lastTableRowType != TableRowType.HEADER) { + this.buildRowBorder(data); + } + this.buildRowLine(data); + this.buildRowBorder(data); + break; + case LINE: + this.buildRowLine(data); + if (i == len - 1) { + this.buildRowBorder(data); + } + break; + default: + break; + } + } + } + + /** + * Method to build a border row. + * @param data dataLine + */ + private void buildRowBorder(List data) { + this.join.append("+"); + for (int i = 0, len = data.size(); i < len; i++) { + for (int j = 0; j < this.maxColMap.get(i) + 2; j++) { + this.join.append("-"); + } + this.join.append("+"); + } + this.join.append("\n"); + } + + /** + * A way to build rows of data. + * @param data dataLine + */ + private void buildRowLine(List data) { + this.join.append("|"); + for (int i = 0, len = data.size(); i < len; i++) { + this.join.append(StrUtils.center(data.get(i), this.maxColMap.get(i) + 2, ' ')) + .append("|"); + } + this.join.append("\n"); + } + + /** + * Rendering is born as a result. + * @return ASCII string of Table + */ + public String render() { + this.buildTable(); + return this.join.toString(); + } + + /** + * The type of each table row and the entity class of the data. + */ + private static class TableRow { + private TableRowType tableRowType; + private List data; + TableRow(TableRowType tableRowType, List data) { + this.tableRowType = tableRowType; + this.data = data; + } + } + + /** + * An enumeration class that distinguishes between table headers and normal table data. + */ + private enum TableRowType { + TITLE, HEADER, LINE + } + + /** + * String utility class. + */ + private static final class StrUtils { + /** + * Puts a string in the middle of a given size. + * @param str Character string + * @param size Total size + * @param padChar Fill character + * @return String result + */ + private static String center(String str, int size, char padChar) { + if (str != null && size > 0) { + int strLen = str.length(); + int pads = size - strLen; + if (pads > 0) { + str = leftPad(str, strLen + pads / 2, padChar); + str = rightPad(str, size, padChar); + } + } + return str; + } + + /** + * Left-fill the given string and size. + * @param str String + * @param size totalSize + * @param padChar Fill character + * @return String result + */ + private static String leftPad(final String str, int size, char padChar) { + int pads = size - str.length(); + return pads <= 0 ? str : repeat(padChar, pads).concat(str); + } + + /** + * Right-fill the given string and size. + * @param str String + * @param size totalSize + * @param padChar Fill character + * @return String result + */ + private static String rightPad(final String str, int size, char padChar) { + int pads = size - str.length(); + return pads <= 0 ? str : str.concat(repeat(padChar, pads)); + } + + /** + * Re-fill characters as strings. + * @param ch String + * @param repeat Number of repeats + * @return String + */ + private static String repeat(char ch, int repeat) { + char[] buf = new char[repeat]; + for (int i = repeat - 1; i >= 0; i--) { + buf[i] = ch; + } + return new String(buf); + } + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java index 8e291056330..0dc1fde57fa 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/checknative/TestCheckNative.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.checknative; import org.apache.hadoop.ozone.shell.checknative.CheckNative; +import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; @@ -27,6 +28,7 @@ import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; @@ -59,6 +61,22 @@ public void testCheckNativeNotLoaded() throws UnsupportedEncodingException { assertThat(stdOut).contains("Native library checking:"); assertThat(stdOut).contains("hadoop: false"); assertThat(stdOut).contains("ISA-L: false"); + assertThat(stdOut).contains("rocks-tools: false"); + } + + @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) + @Test + public void testCheckNativeRocksToolsLoaded() throws UnsupportedEncodingException { + outputStream.reset(); + new CheckNative() + .run(new String[] {}); + // trims multiple spaces + String stdOut = outputStream.toString(DEFAULT_ENCODING) + .replaceAll(" +", " "); + assertThat(stdOut).contains("Native library checking:"); + assertThat(stdOut).contains("hadoop: false"); + assertThat(stdOut).contains("ISA-L: false"); + assertThat(stdOut).contains("rocks-tools: true"); } @AfterEach diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java index 5e259012934..5f0be7859d4 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.debug; +import java.nio.file.Path; import java.nio.file.Paths; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -43,16 +44,13 @@ public class TestDBDefinitionFactory { @Test public void testGetDefinition() { - DBDefinition definition = - DBDefinitionFactory.getDefinition(new OMDBDefinition().getName()); + DBDefinition definition = DBDefinitionFactory.getDefinition(OMDBDefinition.get().getName()); assertInstanceOf(OMDBDefinition.class, definition); - definition = DBDefinitionFactory.getDefinition( - new SCMDBDefinition().getName()); + definition = DBDefinitionFactory.getDefinition(SCMDBDefinition.get().getName()); assertInstanceOf(SCMDBDefinition.class, definition); - definition = DBDefinitionFactory.getDefinition( - new ReconSCMDBDefinition().getName()); + definition = DBDefinitionFactory.getDefinition(ReconSCMDBDefinition.get().getName()); assertInstanceOf(ReconSCMDBDefinition.class, definition); definition = DBDefinitionFactory.getDefinition( @@ -62,20 +60,19 @@ public void testGetDefinition() { definition = DBDefinitionFactory.getDefinition( RECON_CONTAINER_KEY_DB + "_1"); assertInstanceOf(ReconDBDefinition.class, definition); + DBDefinitionFactory.setDnDBSchemaVersion("V2"); - definition = - DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), - new OzoneConfiguration()); + final Path dbPath = Paths.get("/tmp/test-container.db"); + final OzoneConfiguration conf = new OzoneConfiguration(); + definition = DBDefinitionFactory.getDefinition(dbPath, conf); assertInstanceOf(DatanodeSchemaTwoDBDefinition.class, definition); + DBDefinitionFactory.setDnDBSchemaVersion("V1"); - definition = - DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), - new OzoneConfiguration()); + definition = DBDefinitionFactory.getDefinition(dbPath, conf); assertInstanceOf(DatanodeSchemaOneDBDefinition.class, definition); + DBDefinitionFactory.setDnDBSchemaVersion("V3"); - definition = - DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), - new OzoneConfiguration()); + definition = DBDefinitionFactory.getDefinition(dbPath, conf); assertInstanceOf(DatanodeSchemaThreeDBDefinition.class, definition); } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 5b580c81c0e..6d264456682 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -18,16 +18,13 @@ package org.apache.hadoop.ozone.genconf; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.ozone.test.GenericTestUtils; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertNotEquals; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; @@ -40,7 +37,6 @@ import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.net.URL; @@ -54,7 +50,7 @@ * Tests GenerateOzoneRequiredConfigurations. */ public class TestGenerateOzoneRequiredConfigurations { - private static File outputBaseDir; + private static GenerateOzoneRequiredConfigurations genconfTool; private static final Logger LOG = LoggerFactory.getLogger(TestGenerateOzoneRequiredConfigurations.class); @@ -72,8 +68,6 @@ public class TestGenerateOzoneRequiredConfigurations { */ @BeforeAll public static void init() throws Exception { - outputBaseDir = GenericTestUtils.getTestDir(); - FileUtils.forceMkdir(outputBaseDir); genconfTool = new GenerateOzoneRequiredConfigurations(); } @@ -94,14 +88,6 @@ public void reset() { System.setErr(OLD_ERR); } - /** - * Cleans up the output base directory. - */ - @AfterAll - public static void cleanup() throws IOException { - FileUtils.deleteDirectory(outputBaseDir); - } - private void execute(String[] args, String msg) throws UnsupportedEncodingException { List arguments = new ArrayList(Arrays.asList(args)); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java similarity index 99% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java rename to hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java index a581e1d29d6..8a768d0f696 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair; +package org.apache.hadoop.ozone.repair.ldb; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Codec; diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestGetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestGetScmRatisRolesSubcommand.java new file mode 100644 index 00000000000..346b448cc25 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestGetScmRatisRolesSubcommand.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.scm; + +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.ozone.admin.scm.GetScmRatisRolesSubcommand; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import picocli.CommandLine; + +/** + * This unit test is used to verify whether the output of + * `TestGetScmRatisRolesSubcommand` meets the expected results. + */ +public class TestGetScmRatisRolesSubcommand { + + @Test + public void testGetScmHARatisRoles() throws Exception { + GetScmRatisRolesSubcommand cmd = new GetScmRatisRolesSubcommand(); + ScmClient client = mock(ScmClient.class); + CommandLine c = new CommandLine(cmd); + c.parseArgs("--table"); + + List result = new ArrayList<>(); + result.add("bigdata-ozone-online31:9894:FOLLOWER:61b1c8e5-da40-4567-8a17-96a0234ba14e:100.3.197.98"); + result.add("bigdata-ozone-online32:9894:LEADER:e428ca07-b2a3-4756-bf9b-a4abb033c7d1:100.3.192.89"); + result.add("bigdata-ozone-online30:9894:FOLLOWER:41f90734-b3ee-4284-ad96-40a286654952:100.3.196.51"); + + when(client.getScmRatisRoles()).thenAnswer(invocation -> result); + when(client.isScmRatisEnable()).thenAnswer(invocation -> true); + + try (GenericTestUtils.SystemOutCapturer capture = + new GenericTestUtils.SystemOutCapturer()) { + cmd.execute(client); + assertThat(capture.getOutput()).contains( + "bigdata-ozone-online31 | 9894 | FOLLOWER | 61b1c8e5-da40-4567-8a17-96a0234ba14e"); + assertThat(capture.getOutput()).contains( + "bigdata-ozone-online32 | 9894 | LEADER | e428ca07-b2a3-4756-bf9b-a4abb033c7d1"); + assertThat(capture.getOutput()).contains( + "bigdata-ozone-online30 | 9894 | FOLLOWER | 41f90734-b3ee-4284-ad96-40a286654952"); + } + } + + @Test + public void testGetScmStandAloneRoles() throws Exception { + + GetScmRatisRolesSubcommand cmd = new GetScmRatisRolesSubcommand(); + ScmClient client = mock(ScmClient.class); + CommandLine c = new CommandLine(cmd); + c.parseArgs("--table"); + + List result = new ArrayList<>(); + result.add("bigdata-ozone-online31:9894"); + + when(client.getScmRatisRoles()).thenAnswer(invocation -> result); + when(client.isScmRatisEnable()).thenAnswer(invocation -> false); + + try (GenericTestUtils.SystemOutCapturer capture = + new GenericTestUtils.SystemOutCapturer()) { + cmd.execute(client); + assertThat(capture.getOutput()).contains("| bigdata-ozone-online31 | 9894 |"); + } + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java index 620142c244b..7ebb449bff2 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java @@ -42,7 +42,7 @@ public class TestOzoneAddressClientCreation { @Test - public void implicitNonHA() throws OzoneClientException, IOException { + public void implicitNonHA() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); @@ -51,7 +51,7 @@ public void implicitNonHA() throws OzoneClientException, IOException { @Test public void implicitHAOneServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient( @@ -62,7 +62,7 @@ public void implicitHAOneServiceId() @Test public void implicitHaMultipleServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); assertThrows(OzoneClientException.class, () -> @@ -72,7 +72,7 @@ public void implicitHaMultipleServiceId() @Test public void implicitHaMultipleServiceIdWithDefaultServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); InMemoryConfiguration conf = new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, @@ -86,7 +86,7 @@ public void implicitHaMultipleServiceIdWithDefaultServiceId() @Test public void implicitHaMultipleServiceIdWithDefaultServiceIdForS3() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); OzoneConfiguration conf = new OzoneConfiguration(); @@ -100,7 +100,7 @@ public void implicitHaMultipleServiceIdWithDefaultServiceIdForS3() @Test public void explicitHaMultipleServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://service1/vol1/bucket1/key1"); address.createClient( @@ -111,7 +111,7 @@ public void explicitHaMultipleServiceId() } @Test - public void explicitNonHAHostPort() throws OzoneClientException, IOException { + public void explicitNonHAHostPort() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); @@ -122,7 +122,7 @@ public void explicitNonHAHostPort() throws OzoneClientException, IOException { @Test public void explicitHAHostPortWithServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient( @@ -134,7 +134,7 @@ public void explicitHAHostPortWithServiceId() @Test public void explicitAHostPortWithServiceIds() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient( @@ -146,7 +146,7 @@ public void explicitAHostPortWithServiceIds() } @Test - public void explicitNonHAHost() throws OzoneClientException, IOException { + public void explicitNonHAHost() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om/vol1/bucket1/key1"); address.createClient( @@ -156,7 +156,7 @@ public void explicitNonHAHost() throws OzoneClientException, IOException { } @Test - public void explicitHAHostPort() throws OzoneClientException, IOException { + public void explicitHAHostPort() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:1234/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); @@ -166,7 +166,7 @@ public void explicitHAHostPort() throws OzoneClientException, IOException { } @Test - public void explicitWrongScheme() throws OzoneClientException, IOException { + public void explicitWrongScheme() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("ssh://host/vol1/bucket1/key1"); assertThrows(OzoneClientException.class, () -> diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java new file mode 100644 index 00000000000..9c27bedcf7d --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRatis.java @@ -0,0 +1,172 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell; + +import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.charset.StandardCharsets; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for OzoneRatis. + */ +public class TestOzoneRatis { + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + private OzoneRatis ozoneRatis; + + @BeforeEach + public void setUp() throws UnsupportedEncodingException { + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + ozoneRatis = new OzoneRatis(); + } + + @AfterEach + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + /** + * Execute method to invoke the OzoneRatis class and capture output. + * + * @param args command line arguments to pass + * @return the output from OzoneRatis + */ + private String execute(String[] args) throws IOException { + ozoneRatis.execute(args); + return outContent.toString(StandardCharsets.UTF_8.name()); + } + + @Test + public void testBasicOzoneRatisCommand() throws IOException { + String[] args = {""}; + String output = execute(args); + assertTrue(output.contains("Usage: ratis sh [generic options]")); + } + + @Test + public void testLocalRaftMetaConfSubcommand(@TempDir Path tempDir) throws IOException { + // Set up temporary directory and files + Path metadataDir = tempDir.resolve("data/metadata/ratis/test-cluster/current/"); + Files.createDirectories(metadataDir); + + // Create a dummy raft-meta.conf file using protobuf + Path raftMetaConfFile = metadataDir.resolve("raft-meta.conf"); + + // Create a LogEntryProto with a dummy index and peer + RaftProtos.RaftPeerProto raftPeerProto = RaftProtos.RaftPeerProto.newBuilder() + .setId(ByteString.copyFromUtf8("peer1")) + .setAddress("localhost:8000") + .setStartupRole(RaftProtos.RaftPeerRole.FOLLOWER) + .build(); + + RaftProtos.LogEntryProto logEntryProto = RaftProtos.LogEntryProto.newBuilder() + .setConfigurationEntry(RaftProtos.RaftConfigurationProto.newBuilder() + .addPeers(raftPeerProto).build()) + .setIndex(0) + .build(); + + // Write the logEntryProto to the raft-meta.conf file + try (OutputStream out = Files.newOutputStream(raftMetaConfFile)) { + logEntryProto.writeTo(out); + } + + + String[] args = {"local", "raftMetaConf", "-peers", "peer1|localhost:8080", "-path", metadataDir.toString()}; + String output = execute(args); + + assertTrue(output.contains("Index in the original file is: 0")); + assertTrue(output.contains("Generate new LogEntryProto info is:")); + + // Verify that the new raft-meta.conf is generated + Path newRaftMetaConfFile = metadataDir.resolve("new-raft-meta.conf"); + assertTrue(Files.exists(newRaftMetaConfFile), "New raft-meta.conf file should be created."); + + // Verify content of the newly generated file + try (InputStream in = Files.newInputStream(newRaftMetaConfFile)) { + RaftProtos.LogEntryProto newLogEntryProto = RaftProtos.LogEntryProto.parseFrom(in); + assertEquals(1, newLogEntryProto.getIndex()); + RaftProtos.RaftPeerProto peerProto = newLogEntryProto.getConfigurationEntry().getPeers(0); + assertEquals("peer1", peerProto.getId().toStringUtf8()); + assertEquals("localhost:8080", peerProto.getAddress()); + assertEquals(RaftProtos.RaftPeerRole.FOLLOWER, peerProto.getStartupRole()); + } + } + + @Test + public void testMissingRequiredArguments() throws IOException { + String[] args = {"local", "raftMetaConf"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required options: peers, path")); + } + + @Test + public void testMissingPeerArgument() throws IOException { + String[] args = {"local", "raftMetaConf", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required option: peers")); + } + + @Test + public void testMissingPathArgument() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "localhost:8080"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse args for raftMetaConf: Missing required option: path")); + } + + @Test + public void testInvalidPeersFormat() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "localhost8080", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Failed to parse the server address parameter \"localhost8080\".")); + } + + @Test + public void testDuplicatePeersAddress() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "localhost:8080,localhost:8080", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Found duplicated address: localhost:8080.")); + } + + @Test + public void testDuplicatePeersId() throws IOException { + String[] args = {"local", "raftMetaConf", "-peers", "peer1|localhost:8080,peer1|localhost:8081", "-path", "/path"}; + String output = execute(args); + assertTrue(output.contains("Found duplicated ID: peer1.")); + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java index d4fa929614f..3b22573eb13 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -85,7 +84,7 @@ public void tearDown() { @Test public void testChecksumKeyHandler() - throws OzoneClientException, IOException { + throws IOException { OzoneAddress address = new OzoneAddress("o3://ozone1/volume/bucket/key"); long keySize = 1024L; diff --git a/pom.xml b/pom.xml index b5a6323bed9..8cf055522b2 100644 --- a/pom.xml +++ b/pom.xml @@ -12,413 +12,357 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone ozone-main - 1.5.0-SNAPSHOT - Apache Ozone Main - Apache Ozone Main + 2.0.0-SNAPSHOT pom + Apache Ozone Main + Apache Ozone Main + dev-support hadoop-hdds hadoop-ozone - - - ${distMgmtStagingId} - ${distMgmtStagingName} - ${distMgmtStagingUrl} - - - ${distMgmtSnapshotsId} - ${distMgmtSnapshotsName} - ${distMgmtSnapshotsUrl} - - - - - - ${distMgmtSnapshotsId} - ${distMgmtSnapshotsName} - ${distMgmtSnapshotsUrl} - - - - - - Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - - - - - Apache Software Foundation - http://www.apache.org - - - 2.10.2 - 3.3.6 - - - ${ozone.version} - 1.5.0-SNAPSHOT - Indiana Dunes - ${hdds.version} - ${ozone.version} - - - ${hdds.version} - - 3.1.0 - - - 1.0.6 - - - 2.3.0 - - apache.snapshots.https - Apache Development Snapshot Repository - https://repository.apache.org/content/repositories/snapshots - apache.staging.https - Apache Release Distribution Repository - https://repository.apache.org/service/local/staging/deploy/maven2 - - - UTF-8 - UTF-8 - - 3.2.4 - bash - - false - false - true - 9.4.55.v20240627 - 5.2.0 - 1.0-1 - 4.2.0 - - - 4 - + 1.2.2 + 1.3.2 + 1.0 + 0.16.1 + 1.14 + 1.9.7 + 3.26.3 + 1.12.661 + 0.8.0.RELEASE + 1.79 + 3.6.0 + 2.0 + 9.3 + true + 3.0.2 + 9.40 1.9.4 1.8.0 - 1.17.0 + 1.17.1 3.2.2 - 1.27.0 - 2.10.1 - 1.5.6-4 + 1.27.1 + 2.11.0 1.4.0 - 2.16.1 - 3.14.0 - 1.2 + 1.5 + 2.18.0 + 3.17.0 1.1 + 1.2 3.6.1 3.11.1 1.12.0 1.6 - 1.5 - 1.9.0 - - ${project.build.directory}/test-dir - ${test.build.dir} - - file:///dev/urandom - - 1.3.2 - 3.26.3 - 0.8.0.RELEASE - 1.78.1 + 0.21.0 + 4.2.0 + 2.9.1 + + ${hadoop.version} + ${hdds.version} + ${ozone.version} 10.14.2.0 - 3.0.2 - 2.8.0 + 3.4.4 + apache.snapshots.https + Apache Development Snapshot Repository + https://repository.apache.org/content/repositories/snapshots + apache.staging.https + Apache Release Distribution Repository + https://repository.apache.org/service/local/staging/deploy/maven2 + 3.6.1 + 0.45.1 + 1.9.0 3.2.6 + + + [${javac.version},) + [3.3.0,) + 2.29.2 + + native | unhealthy + 3.5.0 + false + 1.15.1 + 3.19.6 + 2.10.1 + 32.1.3-jre + 6.0.0 + 1.3.0 + 3.4.1 + 2.10.2 + 2.2 + ${hdds.version} + ${ozone.version} + 2.6.1 + 4.5.14 + 4.4.16 + 1.58.0 + 1.9.13 + 1.9.13 + 2.16.2 0.8.12 + 1.8.1 + 2.1.1 + 2.6.1 + 2.0.2 + 2.1.6 + 5.2.0 + file:///dev/urandom + 8 3.30.2-GA - 1.2.2 + 1 2.3.3 2.3.9 + 1.0-1 + 1.19.4 + 2.45 + 9.4.56.v20240826 + 1.4.0 + 3.9.12 + 3.28.0 0.10.4 - 3.1.19 + 3.1.20 + 2.12.7 + 3.11.10 0.1.55 - 2.0 - 3.1.0 2.1 1.1.1 - - - 1.19.4 - 2.43 - - - 1.9.13 - 1.9.13 - 2.16.2 - 5.4.0 - - - 1.8.1 - 0.33.0 - - 2.6.1 - - - 4.5.14 - 4.4.16 - - - 2.0.16 - 2.23.1 - 3.4.4 - 1.2.25 - + 5.11.3 1.0.1 1.9.25 + 2.5.0 + 2.24.2 + 1.0-beta-1 + 3.1.0 + 3.7.1 + 3.6.0 + 3.4.0 + 3.9.0 + 3.8.1 + 3.1.3 + 3.5.0 + 3.2.7 + 3.1.3 + 3.4.2 + 3.11.1 + 1.6.1 + 1.7.0 + 3.3.0 + 3.6.0 + 3.21.0 + 3.3.1 + -Xmx8192m -XX:+HeapDumpOnOutOfMemoryError + + 3.0.0-M4 + ${maven-surefire-plugin.version} + 3.4.0 + 3.9.9 + true 1.11 - 4.7.6 + 4.11.0 + 1.0-M1 + + + + 4.1.109.Final + + 16.14.2 + 4.12.0 + 0.33.0 + 1.7.1 + Indiana Dunes + 2.0.0-SNAPSHOT + 4.7.5 + 4.2.2 + + 2023-01-01T00:00:00Z + + UTF-8 + UTF-8 0.16.0 - 0.10.2 - - - 1.7 - + 1.2.1 + 1.0.7 2.5.0 - 3.7.1 - 1.1.1 - - 3.1.12.2 - 2.1.9 - 4.12.0 - 4.2.2 - 2.6.1 - 2.1.1 - 2.0.2 - 2.1.6 - 1 - 2.12.7 - - 1.0 - 0.21.0 - 2.29.2 - 32.1.3-jre - 6.0.0 - 2.10.1 - - 2.7.5 - 3.6.0 - 4.11.0 - 2.2 - 5.10.3 - 3.8.4 - + 3.23.4 0.6.1 - 3.19.6 - 1.7.1 - - - - 4.1.109.Final - 1.58.0 - + 2.3.0 + 1.0.6 + 3.1.2 + 1.7 + 0.10.2 + 1.2.25 + 2.6.0 7.7.3 - 3.46.0.0 - 3.1.9.Final - - - 8 - - - - [${javac.version},) - [3.3.0,) - - - -Xmx8192m -XX:+HeapDumpOnOutOfMemoryError - - native | unhealthy - flaky | native | slow | unhealthy - 3.0.0-M4 - ${maven-surefire-plugin.version} - - 3.4.0 - 3.9.0 - 3.1.2 - 3.1.0 - 3.6.0 - 3.4.2 - 3.4.0 - 3.3.1 - 1.6.1 - 1.7.0 - 3.5.0 - 3.7.0 - 3.7.1 - 0.16.1 - 3.1.2 - 3.6.0 - 3.7.1 - 4.2.2 - 0.45.0 - 3.4.1 - 2.4.0 - 1.0-beta-1 - 1.0-M1 - 3.4.0 - 3.12.1 - 3.1.0 - 9.3 - 1200 - 1.12.661 - 1.15.0 - - ${hadoop.version} - 1.0.7 - - 1.5.4 + 3.1.0 + bash + 2.0.16 2.0 + 1.1.10.7 ${basedir}/target/classes - - 1.9.7 - 1.15.0 - 2.5.0 - 1.4.0 - 3.9.8.1 - + 3.0.1 + 3.1.12.2 5.3.39 - 3.11.10 - - - 16.14.2 - + 3.47.1.0 + 4.2.2 + false + 1200 + 1.5.4 + ${test.build.dir} + ${project.build.directory}/test-dir + + 4 + + flaky | native | slow | unhealthy 5.1.0 - - 1.2.1 - 3.9.8 - 1.1.10.5 - 1.2.0 - 9.40 + 3.1.9.Final + 5.4.0 + 3.8.4 + 1.5.6-8 - com.squareup.okio - okio - ${okio.version} - - - com.squareup.okhttp - okhttp - ${okhttp.version} + com.fasterxml.jackson + jackson-bom + ${jackson2-bom.version} + pom + import - info.picocli - picocli - ${picocli.version} + io.grpc + grpc-bom + ${io.grpc.version} + pom + import - org.apache.derby - derby - ${derby.version} + io.netty + netty-bom + ${netty.version} + pom + import - org.apache.hadoop - hadoop-assemblies - ${hadoop.version} + org.jetbrains.kotlin + kotlin-bom + ${kotlin.version} + pom + import - org.apache.hadoop - hadoop-annotations - ${hadoop.version} + org.junit + junit-bom + ${junit5.version} + pom + import + - org.apache.hadoop - hadoop-common - ${hadoop.version} + aopalliance + aopalliance + ${aopalliance.version} - org.apache.hadoop - hadoop-common - ${hadoop.version} - test-jar + ch.qos.reload4j + reload4j + ${reload4j.version} + + + com.sun.jdmk + jmxtools + + + com.sun.jmx + jmxri + + + javax.jms + jms + + + javax.jms + jmx + + + javax.mail + mail + + - org.apache.hadoop - hadoop-auth - ${hadoop.version} + com.amazonaws + aws-java-sdk-core + ${aws-java-sdk.version} - org.apache.hadoop - hadoop-auth - ${hadoop.version} - test-jar + com.amazonaws + aws-java-sdk-s3 + ${aws-java-sdk.version} - org.apache.hadoop - hadoop-hdfs - ${hadoop.version} + com.bettercloud + vault-java-driver + ${vault.driver.version} - org.apache.hadoop - hadoop-hdfs-client - ${hadoop.version} + com.codahale.metrics + metrics-core + ${codahale-metrics.version} - org.apache.hadoop - hadoop-mapreduce-client-jobclient - ${hadoop.version} - test + com.fasterxml.woodstox + woodstox-core + ${woodstox.version} - org.apache.hadoop - hadoop-distcp - ${hadoop.version} + com.github.jnr + jnr-constants + ${jnr-constants.version} - org.apache.hadoop - hadoop-distcp - ${hadoop.version} - test-jar + com.github.jnr + jnr-posix + ${jnr-posix.version} - org.apache.hadoop - hadoop-client - ${hadoop.version} + com.github.luben + zstd-jni + ${zstd-jni.version} - org.apache.hadoop - hadoop-minikdc - ${hadoop.version} + com.github.stephenc.jcip + jcip-annotations + ${jcip-annotations.version} - org.apache.hadoop - hadoop-kms - ${hadoop.version} + com.github.vlsi.mxgraph + jgraphx + ${jgraphx.version} - org.apache.hadoop - hadoop-kms - ${hadoop.version} - test-jar + com.google.code.gson + gson + ${gson.version} - aopalliance - aopalliance - ${aopalliance.version} + com.google.errorprone + error_prone_annotations + ${errorprone-annotations.version} + true com.google.guava @@ -432,60 +376,124 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - com.google.code.gson - gson - ${gson.version} + com.google.inject + guice + ${guice.version} - commons-cli - commons-cli - ${commons-cli.version} + com.google.inject.extensions + guice-assistedinject + ${guice.version} - org.apache.commons - commons-math3 - ${commons-math3.version} + com.google.inject.extensions + guice-servlet + ${guice.version} - org.apache.commons - commons-compress - ${commons-compress.version} + com.google.protobuf + protobuf-java + ${proto2.hadooprpc.protobuf.version} - org.apache.httpcomponents - httpclient - ${httpclient.version} + com.google.re2j + re2j + ${re2j.version} - org.apache.httpcomponents - httpcore - ${httpcore.version} + com.google.testing.compile + compile-testing + ${compile-testing.version} - org.apache.httpcomponents - httpcore-nio - ${httpcore.version} + com.jcraft + jsch + ${jsch.version} - org.apache.kerby - kerb-core - ${kerby.version} + com.jolbox + bonecp + ${bonecp.version} - org.apache.kerby - kerb-util - ${kerby.version} + com.lmax + disruptor + ${disruptor.version} - org.apache.kerby - kerby-util - ${kerby.version} + com.nimbusds + nimbus-jose-jwt + ${com.nimbusds.nimbus-jose-jwt.version} + + + com.squareup.okhttp3 + okhttp + ${okhttp3.version} + + + com.sun.jersey + jersey-client + ${jersey.version} + + + com.sun.jersey + jersey-core + ${jersey.version} + + + commons-beanutils + commons-beanutils + ${commons-beanutils.version} + + + commons-cli + commons-cli + ${commons-cli.version} commons-codec commons-codec ${commons-codec.version} + + commons-collections + commons-collections + ${commons-collections.version} + + + commons-daemon + commons-daemon + ${commons-daemon.version} + + + commons-io + commons-io + ${commons-io.version} + + + commons-logging + commons-logging + ${commons-logging.version} + + + avalon-framework + avalon-framework + + + javax.servlet + javax.servlet-api + + + logkit + logkit + + + + + commons-logging + commons-logging-api + ${commons-logging-api.version} + commons-net commons-net @@ -497,95 +505,133 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${commons-validator.version} - com.github.jnr - jnr-constants - ${jnr-constants.version} + dnsjava + dnsjava + ${dnsjava.version} - com.github.jnr - jnr-posix - ${jnr-posix.version} + info.picocli + picocli + ${picocli.version} - com.github.luben - zstd-jni - ${zstd-jni.version} + info.picocli + picocli-shell-jline3 + ${picocli.version} - com.github.stephenc.jcip - jcip-annotations - ${jcip-annotations.version} + io.dropwizard.metrics + metrics-core + ${dropwizard-metrics.version} - javax.annotation - javax.annotation-api - ${annotation-api.version} + io.jaegertracing + jaeger-client + ${jaeger.version} + + + org.apache.tomcat.embed + tomcat-embed-core + + + org.jetbrains.kotlin + kotlin-stdlib + + - javax.enterprise - cdi-api - ${cdi-api.version} + io.jaegertracing + jaeger-core + ${jaeger.version} - javax.inject - javax.inject - ${javax.inject.version} + io.opentracing + opentracing-api + ${opentracing.version} - javax.servlet - javax.servlet-api - ${servlet-api.version} + io.opentracing + opentracing-noop + ${opentracing.version} - javax.ws.rs - jsr311-api - ${jsr311-api.version} + io.opentracing + opentracing-util + ${opentracing.version} - org.eclipse.jetty - jetty-server - ${jetty.version} + io.prometheus + simpleclient + ${prometheus.version} - org.eclipse.jetty - javax.servlet-api + io.prometheus + simpleclient_tracer_common + + + io.prometheus + simpleclient_tracer_otel + + + io.prometheus + simpleclient_tracer_otel_agent - org.eclipse.jetty - jetty-util - ${jetty.version} + io.prometheus + simpleclient_common + ${prometheus.version} + + + io.prometheus + simpleclient_dropwizard + ${prometheus.version} + + + jakarta.activation + jakarta.activation-api + ${activation-api.version} + + + jakarta.annotation + jakarta.annotation-api + ${jakarta.annotation.version} + + + jakarta.validation + jakarta.validation-api + ${jakarta.validation.version} - org.eclipse.jetty - jetty-servlet - ${jetty.version} + jakarta.ws.rs + jakarta.ws.rs-api + ${jakarta.ws.rs-api.version} - org.eclipse.jetty - jetty-webapp - ${jetty.version} + jakarta.xml.bind + jakarta.xml.bind-api + ${jaxb-api.version} - org.eclipse.jetty - jetty-util-ajax - ${jetty.version} + javax.annotation + javax.annotation-api + ${annotation-api.version} - org.eclipse.jetty - jetty-http - ${jetty.version} + javax.enterprise + cdi-api + ${cdi-api.version} - org.eclipse.jetty - jetty-io - ${jetty.version} + javax.inject + javax.inject + ${javax.inject.version} - org.eclipse.jetty - jetty-client - ${jetty.version} + javax.servlet + javax.servlet-api + ${servlet-api.version} javax.servlet.jsp @@ -593,235 +639,189 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${jsp-api.version} - org.glassfish.hk2 - guice-bridge - ${hk2.version} + javax.ws.rs + jsr311-api + ${jsr311-api.version} - org.glassfish.hk2 - hk2-api - ${hk2.version} + joda-time + joda-time + ${joda.time.version} - org.glassfish.jersey.containers - jersey-container-servlet - ${jersey2.version} + net.java.dev.jna + jna + ${java.dev.jna.version} + + + net.java.dev.jna + jna-platform + ${java.dev.jna.version} + + + org.apache.commons + commons-compress + ${commons-compress.version} + + + org.apache.commons + commons-configuration2 + ${commons-configuration2.version} - org.glassfish.hk2 - hk2-api + org.apache.commons + commons-lang3 - compile - - - org.glassfish.jersey.containers - jersey-container-servlet-core - ${jersey2.version} - org.glassfish.jersey.core - jersey-common - ${jersey2.version} + org.apache.commons + commons-lang3 + ${commons-lang3.version} - org.glassfish.jersey.core - jersey-server - ${jersey2.version} + org.apache.commons + commons-math3 + ${commons-math3.version} - org.glassfish.jersey.ext.cdi - jersey-cdi1x - ${jersey2.version} + org.apache.commons + commons-text + ${commons-text.version} - org.glassfish.jersey.inject - jersey-hk2 - ${jersey2.version} - - - org.glassfish.hk2 - hk2-api - - - org.glassfish.hk2 - hk2-utils - - - org.glassfish.hk2.external - aopalliance-repackaged - - + org.apache.curator + curator-client + ${curator.version} - org.glassfish.jersey.media - jersey-media-json-jackson - ${jersey2.version} + org.apache.curator + curator-framework + ${curator.version} - org.glassfish.jersey.media - jersey-media-jaxb - ${jersey2.version} + org.apache.derby + derby + ${derby.version} - com.sun.jersey - jersey-core - ${jersey.version} + org.apache.hadoop + hadoop-annotations + ${hadoop.version} - - com.google.errorprone - error_prone_annotations - ${errorprone-annotations.version} - true + org.apache.hadoop + hadoop-assemblies + ${hadoop.version} - - com.google.inject - guice - ${guice.version} + org.apache.hadoop + hadoop-auth + ${hadoop.version} - com.google.inject.extensions - guice-assistedinject - ${guice.version} + org.apache.hadoop + hadoop-auth + ${hadoop.version} + test-jar - com.google.inject.extensions - guice-servlet - ${guice.version} + org.apache.hadoop + hadoop-client + ${hadoop.version} - com.jolbox - bonecp - ${bonecp.version} + org.apache.hadoop + hadoop-cloud-storage + ${hadoop.version} - - org.apache.ratis - ratis-thirdparty-misc - ${ratis.thirdparty.version} + org.apache.hadoop + hadoop-common + ${hadoop.version} - org.apache.ratis - ratis-proto-shaded - ${ratis.version} + org.apache.hadoop + hadoop-common + ${hadoop.version} + test-jar - org.apache.ratis - ratis-client - ${ratis.version} + org.apache.hadoop + hadoop-distcp + ${hadoop.version} - org.apache.ratis - ratis-server-api - ${ratis.version} + org.apache.hadoop + hadoop-distcp + ${hadoop.version} + test-jar - ratis-server - org.apache.ratis - ${ratis.version} + org.apache.hadoop + hadoop-hdfs + ${hadoop.version} - org.apache.ratis - ratis-metrics-api - ${ratis.version} + org.apache.hadoop + hadoop-hdfs-client + ${hadoop.version} - ratis-metrics-dropwizard3 - org.apache.ratis - ${ratis.version} + org.apache.hadoop + hadoop-kms + ${hadoop.version} - ratis-netty - org.apache.ratis - ${ratis.version} + org.apache.hadoop + hadoop-kms + ${hadoop.version} + test-jar - ratis-grpc - org.apache.ratis - ${ratis.version} + org.apache.hadoop + hadoop-minikdc + ${hadoop.version} - org.apache.ratis - ratis-proto - ${ratis.version} + org.apache.hadoop + hadoop-sls + ${hadoop.version} - org.apache.ratis - ratis-tools - ${ratis.version} + org.apache.hadoop.thirdparty + hadoop-shaded-protobuf_3_25 + ${hadoop-thirdparty.version} - org.apache.ratis - ratis-common - ${ratis.version} + org.apache.httpcomponents + httpclient + ${httpclient.version} - - io.netty - netty-bom - ${netty.version} - pom - import + org.apache.httpcomponents + httpcore + ${httpcore.version} - - commons-io - commons-io - ${commons-io.version} + org.apache.httpcomponents + httpcore-nio + ${httpcore.version} - - commons-logging - commons-logging - ${commons-logging.version} - - - avalon-framework - avalon-framework - - - logkit - logkit - - - javax.servlet - javax.servlet-api - - + org.apache.kerby + kerb-core + ${kerby.version} - commons-logging - commons-logging-api - ${commons-logging-api.version} + org.apache.kerby + kerb-util + ${kerby.version} - ch.qos.reload4j - reload4j - ${reload4j.version} - - - com.sun.jdmk - jmxtools - - - com.sun.jmx - jmxri - - - javax.mail - mail - - - javax.jms - jmx - - - javax.jms - jms - - + org.apache.kerby + kerby-util + ${kerby.version} org.apache.logging.log4j @@ -834,462 +834,377 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${log4j2.version} - com.codahale.metrics - metrics-core - ${codahale-metrics.version} - - - com.lmax - disruptor - ${disruptor.version} - - - com.amazonaws - aws-java-sdk-core - ${aws-java-sdk.version} - - - com.amazonaws - aws-java-sdk-s3 - ${aws-java-sdk.version} + org.apache.ratis + ratis-client + ${ratis.version} - org.hamcrest - hamcrest - ${hamcrest.version} + org.apache.ratis + ratis-common + ${ratis.version} - org.jacoco - org.jacoco.core - provided - ${jacoco.version} + org.apache.ratis + ratis-grpc + ${ratis.version} - org.javassist - javassist - ${javassist.version} + org.apache.ratis + ratis-metrics-api + ${ratis.version} - org.jooq - jooq - ${jooq.version} - - - javax.xml.bind - jaxb-api - - + org.apache.ratis + ratis-metrics-dropwizard3 + ${ratis.version} - org.jooq - jooq-codegen - ${jooq.version} + org.apache.ratis + ratis-netty + ${ratis.version} - org.jooq - jooq-meta - ${jooq.version} + org.apache.ratis + ratis-proto + ${ratis.version} - org.junit - junit-bom - ${junit5.version} - pom - import + org.apache.ratis + ratis-proto-shaded + ${ratis.version} - commons-collections - commons-collections - ${commons-collections.version} + org.apache.ratis + ratis-server + ${ratis.version} - commons-beanutils - commons-beanutils - ${commons-beanutils.version} + org.apache.ratis + ratis-server-api + ${ratis.version} - org.apache.commons - commons-configuration2 - ${commons-configuration2.version} + org.apache.ratis + ratis-shell + ${ratis.version} - org.apache.commons - commons-lang3 + org.slf4j + * - org.apache.commons - commons-lang3 - ${commons-lang3.version} + org.apache.ratis + ratis-thirdparty-misc + ${ratis.thirdparty.version} - org.apache.commons - commons-text - ${commons-text.version} + org.apache.ratis + ratis-tools + ${ratis.version} org.apache.zookeeper zookeeper ${zookeeper.version} - - ch.qos.logback - logback-core - ch.qos.logback logback-classic - - - - org.slf4j - slf4j-api - ${slf4j.version} - - - org.slf4j - slf4j-reload4j - ${slf4j.version} - - - org.slf4j - jul-to-slf4j - ${slf4j.version} - - - org.springframework - spring-core - ${spring.version} - - org.springframework - spring-jcl + ch.qos.logback + logback-core - org.springframework - spring-jdbc - ${spring.version} - - - org.springframework - spring-tx - ${spring.version} - - - com.fasterxml.woodstox - woodstox-core - ${woodstox.version} - - - com.fasterxml.jackson - jackson-bom - ${jackson2-bom.version} - pom - import - - - org.mockito - mockito-core - ${mockito.version} - - - org.mockito - mockito-junit-jupiter - ${mockito.version} - - - com.google.testing.compile - compile-testing - ${compile-testing.version} - - - com.google.re2j - re2j - ${re2j.version} + org.assertj + assertj-core + ${assertj.version} - com.google.protobuf - protobuf-java - ${proto2.hadooprpc.protobuf.version} + org.bouncycastle + bcpkix-jdk18on + ${bouncycastle.version} - org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_7 - ${hadoop-thirdparty.version} + org.bouncycastle + bcprov-jdk18on + ${bouncycastle.version} - commons-daemon - commons-daemon - ${commons-daemon.version} + org.bouncycastle + bcutil-jdk18on + ${bouncycastle.version} - com.jcraft - jsch - ${jsch.version} + org.codehaus.woodstox + stax2-api + ${stax2.version} - - org.kohsuke.metainf-services - metainf-services - ${metainf-services.version} - true + org.eclipse.jetty + jetty-client + ${jetty.version} - io.dropwizard.metrics - metrics-core - ${dropwizard-metrics.version} + org.eclipse.jetty + jetty-http + ${jetty.version} - io.grpc - grpc-bom - ${io.grpc.version} - pom - import + org.eclipse.jetty + jetty-io + ${jetty.version} - io.jaegertracing - jaeger-client - ${jaeger.version} + org.eclipse.jetty + jetty-server + ${jetty.version} - org.apache.tomcat.embed - tomcat-embed-core - - - org.jetbrains.kotlin - kotlin-stdlib + org.eclipse.jetty + javax.servlet-api - io.jaegertracing - jaeger-core - ${jaeger.version} + org.eclipse.jetty + jetty-servlet + ${jetty.version} - org.jetbrains.kotlin - kotlin-bom - ${kotlin.version} - pom - import + org.eclipse.jetty + jetty-util + ${jetty.version} - io.opentracing - opentracing-api - ${opentracing.version} + org.eclipse.jetty + jetty-util-ajax + ${jetty.version} - io.opentracing - opentracing-noop - ${opentracing.version} + org.eclipse.jetty + jetty-webapp + ${jetty.version} - io.opentracing - opentracing-util - ${opentracing.version} + org.glassfish.hk2 + guice-bridge + ${hk2.version} - io.prometheus - simpleclient_dropwizard - ${prometheus.version} + org.glassfish.hk2 + hk2-api + ${hk2.version} - io.prometheus - simpleclient - ${prometheus.version} + org.glassfish.hk2.external + jakarta.inject + ${jakarta.inject.version} + + + org.glassfish.jaxb + jaxb-runtime + ${jaxb-runtime.version} - io.prometheus - simpleclient_tracer_common - - - io.prometheus - simpleclient_tracer_otel + javax.xml.bind + jaxb-api + + + + org.glassfish.jersey.containers + jersey-container-servlet + ${jersey2.version} + compile + - io.prometheus - simpleclient_tracer_otel_agent + org.glassfish.hk2 + hk2-api - io.prometheus - simpleclient_common - ${prometheus.version} + org.glassfish.jersey.containers + jersey-container-servlet-core + ${jersey2.version} - org.apache.hadoop - hadoop-sls - ${hadoop.version} + org.glassfish.jersey.core + jersey-common + ${jersey2.version} - org.apache.hadoop - hadoop-cloud-storage - ${hadoop.version} + org.glassfish.jersey.core + jersey-server + ${jersey2.version} - jakarta.xml.bind - jakarta.xml.bind-api - ${jaxb-api.version} + org.glassfish.jersey.ext.cdi + jersey-cdi1x + ${jersey2.version} - org.glassfish.jaxb - jaxb-runtime - ${jaxb-runtime.version} + org.glassfish.jersey.inject + jersey-hk2 + ${jersey2.version} - javax.xml.bind - jaxb-api + org.glassfish.hk2 + hk2-api + + + org.glassfish.hk2 + hk2-utils + + + org.glassfish.hk2.external + aopalliance-repackaged - com.sun.jersey - jersey-client - ${jersey.version} + org.glassfish.jersey.media + jersey-media-jaxb + ${jersey2.version} - - org.bouncycastle - bcprov-jdk18on - ${bouncycastle.version} + org.glassfish.jersey.media + jersey-media-json-jackson + ${jersey2.version} - org.bouncycastle - bcpkix-jdk18on - ${bouncycastle.version} + org.hamcrest + hamcrest + ${hamcrest.version} - org.bouncycastle - bcutil-jdk18on - ${bouncycastle.version} + org.javassist + javassist + ${javassist.version} - - dnsjava - dnsjava - ${dnsjava.version} + org.jboss.weld.servlet + weld-servlet-shaded + ${weld-servlet.version} - - org.yaml - snakeyaml - ${snakeyaml.version} + org.jgrapht + jgrapht-core + ${jgrapht.version} - org.assertj - assertj-core - ${assertj.version} + org.jgrapht + jgrapht-ext + ${jgrapht.version} - org.jboss.weld.servlet - weld-servlet-shaded - ${weld-servlet.version} + org.jline + jline + ${jline.version} - org.reflections - reflections - ${reflections.version} + org.jooq + jooq + ${jooq.version} - com.google.code.findbugs - jsr305 + javax.xml.bind + jaxb-api - org.rocksdb - rocksdbjni - ${rocksdb.version} - - - org.xerial - sqlite-jdbc - ${sqlite.version} - - - jakarta.activation - jakarta.activation-api - ${activation-api.version} - - - com.squareup.okhttp3 - okhttp - ${okhttp3.version} + org.jooq + jooq-codegen + ${jooq.version} - org.codehaus.woodstox - stax2-api - ${stax2.version} + org.jooq + jooq-meta + ${jooq.version} - net.java.dev.jna - jna - ${java.dev.jna.version} + org.kohsuke.metainf-services + metainf-services + ${metainf-services.version} + true - net.java.dev.jna - jna-platform - ${java.dev.jna.version} + org.mockito + mockito-core + ${mockito.version} - org.glassfish.hk2.external - jakarta.inject - ${jakarta.inject.version} + org.mockito + mockito-inline + ${mockito.version} - jakarta.annotation - jakarta.annotation-api - ${jakarta.annotation.version} + org.mockito + mockito-junit-jupiter + ${mockito.version} - jakarta.validation - jakarta.validation-api - ${jakarta.validation.version} + org.reflections + reflections + ${reflections.version} + + + com.google.code.findbugs + jsr305 + + - jakarta.ws.rs - jakarta.ws.rs-api - ${jakarta.ws.rs-api.version} + org.rocksdb + rocksdbjni + ${rocksdb.version} - joda-time - joda-time - ${joda.time.version} + org.slf4j + jul-to-slf4j + ${slf4j.version} - org.apache.curator - curator-framework - ${curator.version} + org.slf4j + slf4j-api + ${slf4j.version} - org.apache.curator - curator-client - ${curator.version} + org.slf4j + slf4j-reload4j + ${slf4j.version} - com.bettercloud - vault-java-driver - ${vault.driver.version} + org.springframework + spring-core + ${spring.version} + + + org.springframework + spring-jcl + + - org.jgrapht - jgrapht-core - ${jgrapht.version} + org.springframework + spring-jdbc + ${spring.version} - org.jgrapht - jgrapht-ext - ${jgrapht.version} + org.springframework + spring-tx + ${spring.version} - org.mockito - mockito-inline - ${mockito.version} + org.xerial + sqlite-jdbc + ${sqlite.version} org.xerial.snappy @@ -1297,33 +1212,33 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${snappy-java.version} - org.apache.hadoop.thirdparty - hadoop-shaded-guava - ${hadoop-shaded-guava.version} + org.yaml + snakeyaml + ${snakeyaml.version} - com.github.vlsi.mxgraph - jgraphx - ${jgraphx.version} + org.jacoco + org.jacoco.core + ${jacoco.version} + provided - com.nimbusds - nimbus-jose-jwt - ${com.nimbusds.nimbus-jose-jwt.version} + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${hadoop.version} + test - - - kr.motd.maven - os-maven-plugin - ${os-maven-plugin.version} - - + + com.github.ekryd.sortpom + sortpom-maven-plugin + ${sortpom-maven-plugin.version} + com.salesforce.servicelibs proto-backwards-compatibility @@ -1360,7 +1275,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${license-maven-plugin.version} false - ${project.basedir} + + ${project.basedir} + @@ -1415,7 +1332,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-javadoc-plugin ${maven-javadoc-plugin.version} - -Xdoclint:none + none + true @@ -1504,10 +1422,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs banned-rocksdb-imports - process-sources enforce + process-sources @@ -1531,12 +1449,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.rocksdb.TransactionLogIterator.BatchResult org.rocksdb.TickerType org.rocksdb.LiveFileMetaData - org.rocksdb.ColumnFamilyHandle org.rocksdb.Env org.rocksdb.Statistics - org.rocksdb.RocksDB.* @@ -1550,10 +1466,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ban-imports - process-sources enforce + process-sources @@ -1631,10 +1547,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ban-annotations - process-sources enforce + process-sources @@ -1691,6 +1607,38 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + org.apache.maven.plugins + maven-dependency-plugin + + + add-classpath-descriptor + + build-classpath + + prepare-package + + ${project.build.outputDirectory}/${project.artifactId}.classpath + $HDDS_LIB_JARS_DIR + true + runtime + ${classpath.skip} + + + + copy-jars + + copy-dependencies + + prepare-package + + ${project.build.directory}/share/ozone/lib + runtime + ${classpath.skip} + + + + maven-clean-plugin @@ -1708,25 +1656,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.maven.plugins maven-remote-resources-plugin ${maven-remote-resources-plugin.version} - - - org.apache.hadoop:hadoop-build-tools:${hadoop.version} - - - - - org.apache.hadoop - hadoop-build-tools - ${hadoop.version} - - - - - - process - - - org.apache.maven.plugins @@ -1735,14 +1664,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs create-testdirs - validate run + validate - - + + @@ -1769,7 +1698,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs prepare-agent - org.apache.hadoop.hdds.*,org.apache.hadoop.ozone.*,org.apache.hadoop.fs.ozone.* + org.apache.hadoop.hdds.*,org.apache.hadoop.ozone.*,org.apache.hadoop.fs.ozone.*,org.apache.ozone.*,org.hadoop.ozone.* @@ -1788,27 +1717,22 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs false - ${project.build.directory}/log ${project.build.directory}/tmp - + true + ${java.security.egd} + ${project.build.directory}/test-classes/krb5.conf + ${require.test.libhadoop} + ${project.build.directory}/test-classes ${test.build.dir} ${test.build.data} ${test.build.webapps} ${test.cache.data} - ${project.build.directory}/test-classes - - true - ${project.build.directory}/test-classes/krb5.conf - ${java.security.egd} - ${require.test.libhadoop} - - junit.platform.output.capture.stdout = true - junit.platform.output.capture.stderr = true - + junit.platform.output.capture.stdout = true + junit.platform.output.capture.stderr = true **/Test*.java @@ -1824,27 +1748,27 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs false - - org.apache.maven.plugins - maven-checkstyle-plugin - ${maven-checkstyle-plugin.version} - - - com.puppycrawl.tools - checkstyle - ${checkstyle.version} - - - - hadoop-hdds/dev-support/checkstyle/checkstyle.xml - hadoop-hdds/dev-support/checkstyle/suppressions.xml - true - false - xml - html - ${project.build.directory}/test/checkstyle-errors.xml - - + + org.apache.maven.plugins + maven-checkstyle-plugin + ${maven-checkstyle-plugin.version} + + hadoop-hdds/dev-support/checkstyle/checkstyle.xml + hadoop-hdds/dev-support/checkstyle/suppressions.xml + true + false + xml + html + ${project.build.directory}/test/checkstyle-errors.xml + + + + com.puppycrawl.tools + checkstyle + ${checkstyle.version} + + + org.apache.maven.plugins maven-site-plugin @@ -1893,10 +1817,94 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs dev-support/rat/rat-exclusions.txt + + com.github.ekryd.sortpom + sortpom-maven-plugin + + false + ${project.build.sourceEncoding} + false + true + 2 + custom_1 + scope,groupId,artifactId + groupId,artifactId + true + true + true + Stop + Strict + + + + + verify + + validate + + + + + + kr.motd.maven + os-maven-plugin + ${os-maven-plugin.version} + + + + Apache Software Foundation + http://www.apache.org + + + + + Apache License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + + + + + + + false + never + + + false + never + + apache.snapshots + https://repository.apache.org/snapshots + + + + false + never + + + false + never + + apache.snapshots.https + https://repository.apache.org/content/repositories/snapshots + + + + + ${distMgmtStagingId} + ${distMgmtStagingName} + ${distMgmtStagingUrl} + + + ${distMgmtSnapshotsId} + ${distMgmtSnapshotsName} + ${distMgmtSnapshotsUrl} + + @@ -1910,10 +1918,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs module-javadocs - package jar + package @@ -1924,11 +1932,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-java-sources - package jar-no-fork test-jar-no-fork + package @@ -1938,10 +1946,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs dist-enforce - package enforce + package @@ -1950,18 +1958,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs cyclonedx-maven-plugin - package - makeAggregateBom + makeBom + package - - sign @@ -1973,10 +1979,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs sign-artifacts - verify sign + verify @@ -1992,8 +1998,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - ${user.home}/.clover.license - ${project.build.directory}/clover/hadoop-coverage.db true + ${project.build.directory}/clover/hadoop-coverage.db + false true true - false + ${user.home}/.clover.license @@ -2040,24 +2046,23 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs clover-setup - process-sources setup + process-sources clover - test clover + test - java8 @@ -2075,19 +2080,31 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs [9,] - ${javac.version} + + ${javac.version} - go-offline - void - true - true - true true true + true + true + true + void + + + + skip-frontend + + + skipRecon + + + + true + true