diff --git a/.config/format.yml b/.config/format.yml new file mode 100644 index 0000000000..a50e0a9971 --- /dev/null +++ b/.config/format.yml @@ -0,0 +1,7 @@ +formatter: + type: basic + indent: 2 + retain_line_breaks_single: true + +exclude: + - "deps/" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 376c4171a6..3cd8df01dd 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,7 +2,6 @@ # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates ---- version: 2 updates: - package-ecosystem: github-actions diff --git a/.github/workflows/build-release-packages.yml b/.github/workflows/build-release-packages.yml index 8c32548f9e..094d82de08 100644 --- a/.github/workflows/build-release-packages.yml +++ b/.github/workflows/build-release-packages.yml @@ -3,7 +3,7 @@ name: Build Release Packages on: release: types: [published] - + workflow_dispatch: inputs: version: @@ -44,20 +44,20 @@ jobs: INPUT_VERSION: ${{ inputs.version || github.ref_name }} generate-build-matrix: - name: Generating build matrix - runs-on: ubuntu-latest - outputs: - x86_64-build-matrix: ${{ steps.set-matrix.outputs.x86_64-build-matrix }} - arm64-build-matrix: ${{ steps.set-matrix.outputs.arm64-build-matrix }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - # Set up the list of target to build so we can pass the JSON to the reusable job - - uses: ./.github/actions/generate-package-build-matrix - id: set-matrix - with: - ref: ${{ inputs.version || github.ref_name }} - + name: Generating build matrix + runs-on: ubuntu-latest + outputs: + x86_64-build-matrix: ${{ steps.set-matrix.outputs.x86_64-build-matrix }} + arm64-build-matrix: ${{ steps.set-matrix.outputs.arm64-build-matrix }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + # Set up the list of target to build so we can pass the JSON to the reusable job + - uses: ./.github/actions/generate-package-build-matrix + id: set-matrix + with: + ref: ${{ inputs.version || github.ref_name }} + release-build-linux-x86-packages: needs: - release-build-get-meta diff --git a/.github/workflows/call-build-linux-arm-packages.yml b/.github/workflows/call-build-linux-arm-packages.yml index db987b3816..80416806c9 100644 --- a/.github/workflows/call-build-linux-arm-packages.yml +++ b/.github/workflows/call-build-linux-arm-packages.yml @@ -45,7 +45,7 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ inputs.version }} - + - name: Make Valkey uses: uraimo/run-on-arch-action@v2 with: @@ -53,7 +53,7 @@ jobs: distro: ${{matrix.distro.target}} install: apt-get update && apt-get install -y build-essential libssl-dev run: make -C src all BUILD_TLS=yes - + - name: Create Tarball and SHA256sums run: | TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}} @@ -64,7 +64,7 @@ jobs: sha256sum $TAR_FILE_NAME.tar.gz > $TAR_FILE_NAME.tar.gz.sha256 mkdir -p packages-files cp -rfv $TAR_FILE_NAME.tar* packages-files/ - + - name: Install AWS cli. run: | sudo apt-get install -y awscli diff --git a/.github/workflows/call-build-linux-x86-packages.yml b/.github/workflows/call-build-linux-x86-packages.yml index 59c9fac52f..b037f9c507 100644 --- a/.github/workflows/call-build-linux-x86-packages.yml +++ b/.github/workflows/call-build-linux-x86-packages.yml @@ -51,7 +51,7 @@ jobs: - name: Make Valkey run: make -C src all BUILD_TLS=yes - + - name: Create Tarball and SHA256sums run: | TAR_FILE_NAME=valkey-${{inputs.version}}-${{matrix.distro.platform}}-${{ matrix.distro.arch}} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 508565d296..4ee0a59039 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,30 +6,29 @@ permissions: contents: read jobs: - test-ubuntu-latest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: make - # Fail build if there are warnings - # build with TLS just for compilation coverage - run: make all-with-unit-tests SERVER_CFLAGS='-Werror' BUILD_TLS=yes - - name: test - run: | - sudo apt-get install tcl8.6 tclx - ./runtest --verbose --tags -slow --dump-logs - - name: module api test - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs - - name: validate commands.def up to date - run: | - touch src/commands/ping.json - make commands.def - dirty=$(git diff) - if [[ ! -z $dirty ]]; then echo $dirty; exit 1; fi - - name: unit tests - run: | - ./src/valkey-unit-tests + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: make + # Fail build if there are warnings + # build with TLS just for compilation coverage + run: make all-with-unit-tests SERVER_CFLAGS='-Werror' BUILD_TLS=yes + - name: test + run: | + sudo apt-get install tcl8.6 tclx + ./runtest --verbose --tags -slow --dump-logs + - name: module api test + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs + - name: validate commands.def up to date + run: | + touch src/commands/ping.json + make commands.def + dirty=$(git diff) + if [[ ! -z $dirty ]]; then echo $dirty; exit 1; fi + - name: unit tests + run: | + ./src/valkey-unit-tests test-sanitizer-address: runs-on: ubuntu-latest @@ -49,43 +48,79 @@ jobs: runs-on: ubuntu-latest container: debian:buster steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: make - run: | - apt-get update && apt-get install -y build-essential - make SERVER_CFLAGS='-Werror' + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: make + run: | + apt-get update && apt-get install -y build-essential + make SERVER_CFLAGS='-Werror' build-macos-latest: runs-on: macos-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: make - run: make SERVER_CFLAGS='-Werror' + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: make + run: make SERVER_CFLAGS='-Werror' build-32bit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: make - run: | - sudo apt-get update && sudo apt-get install libc6-dev-i386 - make SERVER_CFLAGS='-Werror' 32bit + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: make + run: | + sudo apt-get update && sudo apt-get install libc6-dev-i386 + make SERVER_CFLAGS='-Werror' 32bit build-libc-malloc: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: make - run: make SERVER_CFLAGS='-Werror' MALLOC=libc + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: make + run: make SERVER_CFLAGS='-Werror' MALLOC=libc build-almalinux8-jemalloc: runs-on: ubuntu-latest container: almalinux:8 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: make + run: | + dnf -y install epel-release gcc make procps-ng which + make -j SERVER_CFLAGS='-Werror' + + format-yaml: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Set up Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + with: + go-version: "1.22.4" + + - name: Setup YAML formatter + run: | + go install github.com/google/yamlfmt/cmd/yamlfmt@latest - - name: make - run: | - dnf -y install epel-release gcc make procps-ng which - make -j SERVER_CFLAGS='-Werror' + - name: Run yamlfmt + id: yamlfmt + run: | + yamlfmt -lint -conf .config/format.yml . + # Capture the diff output + DIFF=$(git diff) + if [ ! -z "$DIFF" ]; then + # Encode the diff in Base64 to ensure it's handled as a single line + ENCODED_DIFF=$(echo "$DIFF" | base64 -w 0) + echo "diff=$ENCODED_DIFF" >> $GITHUB_OUTPUT + fi + shell: bash + - name: Check for formatting changes + if: ${{ steps.yamlfmt.outputs.diff }} + run: | + echo "ERROR: YAML file is not formatted properly. Here is the diff: " + # Decode the Base64 diff to display it + echo "${{ steps.clang-format.outputs.diff }}" | base64 --decode + exit 1 + shell: bash diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index b851ffe926..7d53cc28e7 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -10,39 +10,39 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout code - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Set up Clang - run: | - sudo apt-get update -y - sudo apt-get upgrade -y - sudo apt-get install software-properties-common -y - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | gpg --dearmor | sudo tee /usr/share/keyrings/llvm-toolchain.gpg > /dev/null - echo "deb [signed-by=/usr/share/keyrings/llvm-toolchain.gpg] http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-18 main" | sudo tee /etc/apt/sources.list.d/llvm.list - sudo apt-get update -y - sudo apt-get install clang-format-18 -y - - name: Run clang-format - id: clang-format - run: | - # Run clang-format and capture the diff - cd src - shopt -s globstar - clang-format-18 -i **/*.c **/*.h - # Capture the diff output - DIFF=$(git diff) - if [ ! -z "$DIFF" ]; then - # Encode the diff in Base64 to ensure it's handled as a single line - ENCODED_DIFF=$(echo "$DIFF" | base64 -w 0) - echo "diff=$ENCODED_DIFF" >> $GITHUB_OUTPUT - fi - shell: bash + - name: Set up Clang + run: | + sudo apt-get update -y + sudo apt-get upgrade -y + sudo apt-get install software-properties-common -y + wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | gpg --dearmor | sudo tee /usr/share/keyrings/llvm-toolchain.gpg > /dev/null + echo "deb [signed-by=/usr/share/keyrings/llvm-toolchain.gpg] http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-18 main" | sudo tee /etc/apt/sources.list.d/llvm.list + sudo apt-get update -y + sudo apt-get install clang-format-18 -y + - name: Run clang-format + id: clang-format + run: | + # Run clang-format and capture the diff + cd src + shopt -s globstar + clang-format-18 -i **/*.c **/*.h + # Capture the diff output + DIFF=$(git diff) + if [ ! -z "$DIFF" ]; then + # Encode the diff in Base64 to ensure it's handled as a single line + ENCODED_DIFF=$(echo "$DIFF" | base64 -w 0) + echo "diff=$ENCODED_DIFF" >> $GITHUB_OUTPUT + fi + shell: bash - - name: Check for formatting changes - if: ${{ steps.clang-format.outputs.diff }} - run: | - echo "Code is not formatted correctly. Here is the diff:" - # Decode the Base64 diff to display it - echo "${{ steps.clang-format.outputs.diff }}" | base64 --decode - exit 1 - shell: bash + - name: Check for formatting changes + if: ${{ steps.clang-format.outputs.diff }} + run: | + echo "ERROR: Code is not formatted correctly. Here is the diff:" + # Decode the Base64 diff to display it + echo "${{ steps.clang-format.outputs.diff }}" | base64 --decode + exit 1 + shell: bash diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index e1493db19b..0a0d4a23db 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -9,16 +9,16 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v4 + - name: Checkout repository + uses: actions/checkout@v4 - - name: Install lcov and run test - run: | - sudo apt-get install lcov - make lcov + - name: Install lcov and run test + run: | + sudo apt-get install lcov + make lcov - - name: Upload code coverage - uses: codecov/codecov-action@v4 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: ./src/valkey.info \ No newline at end of file + - name: Upload code coverage + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: ./src/valkey.info diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d4db9bcc29..6165429c3a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -20,19 +20,19 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'cpp' ] + language: ['cpp'] steps: - - name: Checkout repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Initialize CodeQL - uses: github/codeql-action/init@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 - with: - languages: ${{ matrix.language }} + - name: Initialize CodeQL + uses: github/codeql-action/init@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + with: + languages: ${{ matrix.language }} - - name: Autobuild - uses: github/codeql-action/autobuild@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + - name: Autobuild + uses: github/codeql-action/autobuild@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 47eb27276e..cfd0df1af3 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -2,8 +2,8 @@ name: Coverity Scan on: schedule: - # Run once daily, since below 500k LOC can have 21 builds per week, per https://scan.coverity.com/faq#frequency - - cron: '0 0 * * *' + # Run once daily, since below 500k LOC can have 21 builds per week, per https://scan.coverity.com/faq#frequency + - cron: '0 0 * * *' # Support manual execution workflow_dispatch: permissions: @@ -13,18 +13,18 @@ jobs: if: github.repository == 'valkey-io/valkey' runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Download and extract the Coverity Build Tool - run: | + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Download and extract the Coverity Build Tool + run: | wget -q https://scan.coverity.com/download/cxx/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=valkey-io%2Fvalkey" -O cov-analysis-linux64.tar.gz mkdir cov-analysis-linux64 tar xzf cov-analysis-linux64.tar.gz --strip 1 -C cov-analysis-linux64 - - name: Install Valkey dependencies - run: sudo apt install -y gcc procps libssl-dev - - name: Build with cov-build - run: cov-analysis-linux64/bin/cov-build --dir cov-int make - - name: Upload the result - run: | + - name: Install Valkey dependencies + run: sudo apt install -y gcc procps libssl-dev + - name: Build with cov-build + run: cov-analysis-linux64/bin/cov-build --dir cov-int make + - name: Upload the result + run: | tar czvf cov-int.tgz cov-int curl \ --form email=${{ secrets.COVERITY_SCAN_EMAIL }} \ diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index ff7a9ad67b..7679856d1d 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -4,35 +4,34 @@ on: pull_request: branches: # any PR to a release branch. - - '[0-9].[0-9]' + - "[0-9].[0-9]" schedule: - - cron: '0 0 * * *' + - cron: "0 0 * * *" workflow_dispatch: inputs: skipjobs: - description: 'jobs to skip (delete the ones you wanna keep, do not leave empty)' - default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,rpm-distros,malloc,specific,fortify,reply-schema' + description: "jobs to skip (delete the ones you wanna keep, do not leave empty)" + default: "valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,rpm-distros,malloc,specific,fortify,reply-schema" skiptests: - description: 'tests to skip (delete the ones you wanna keep, do not leave empty)' - default: 'valkey,modules,sentinel,cluster,unittest' + description: "tests to skip (delete the ones you wanna keep, do not leave empty)" + default: "valkey,modules,sentinel,cluster,unittest" test_args: - description: 'extra test arguments' - default: '' + description: "extra test arguments" + default: "" cluster_test_args: - description: 'extra cluster / sentinel test arguments' - default: '' + description: "extra cluster / sentinel test arguments" + default: "" use_repo: - description: 'repo owner and name' - default: 'valkey-io/valkey' + description: "repo owner and name" + default: "valkey-io/valkey" use_git_ref: - description: 'git branch or sha to use' - default: 'unstable' + description: "git branch or sha to use" + default: "unstable" permissions: contents: read jobs: - test-ubuntu-jemalloc: runs-on: ubuntu-latest if: | @@ -40,41 +39,41 @@ jobs: !contains(github.event.inputs.skipjobs, 'ubuntu') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make all-with-unit-tests SERVER_CFLAGS='-Werror -DSERVER_TEST' - - name: testprep - run: sudo apt-get install tcl8.6 tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: legacy unit tests - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: ./src/valkey-server test all --accurate - - name: new unit tests - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: ./src/valkey-unit-tests --accurate + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make all-with-unit-tests SERVER_CFLAGS='-Werror -DSERVER_TEST' + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: legacy unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-server test all --accurate + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-unit-tests --accurate test-ubuntu-jemalloc-fortify: runs-on: ubuntu-latest @@ -84,44 +83,44 @@ jobs: container: ubuntu:lunar timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | - apt-get update && apt-get install -y make gcc-13 - update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100 - make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -DSERVER_TEST -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3' - - name: testprep - run: apt-get install -y tcl8.6 tclx procps - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: legacy unit tests - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: ./src/valkey-server test all --accurate - - name: new unit tests - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: ./src/valkey-unit-tests --accurate + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | + apt-get update && apt-get install -y make gcc-13 + update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100 + make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -DSERVER_TEST -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3' + - name: testprep + run: apt-get install -y tcl8.6 tclx procps + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: legacy unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-server test all --accurate + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-unit-tests --accurate test-ubuntu-libc-malloc: runs-on: ubuntu-latest @@ -130,35 +129,35 @@ jobs: !contains(github.event.inputs.skipjobs, 'malloc') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make MALLOC=libc SERVER_CFLAGS='-Werror' - - name: testprep - run: sudo apt-get install tcl8.6 tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make MALLOC=libc SERVER_CFLAGS='-Werror' + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} test-ubuntu-no-malloc-usable-size: runs-on: ubuntu-latest @@ -167,35 +166,35 @@ jobs: !contains(github.event.inputs.skipjobs, 'malloc') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make MALLOC=libc CFLAGS=-DNO_MALLOC_USABLE_SIZE SERVER_CFLAGS='-Werror' - - name: testprep - run: sudo apt-get install tcl8.6 tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make MALLOC=libc CFLAGS=-DNO_MALLOC_USABLE_SIZE SERVER_CFLAGS='-Werror' + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} test-ubuntu-32bit: runs-on: ubuntu-latest @@ -204,45 +203,45 @@ jobs: !contains(github.event.inputs.skipjobs, '32bit') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | - sudo apt-get update && sudo apt-get install libc6-dev-i386 - make 32bit SERVER_CFLAGS='-Werror -DSERVER_TEST' - - name: testprep - run: sudo apt-get install tcl8.6 tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: | - make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time - CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: legacy unit tests - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: ./src/valkey-server test all --accurate - - name: new unit tests - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: ./src/valkey-unit-tests --accurate + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | + sudo apt-get update && sudo apt-get install libc6-dev-i386 + make 32bit SERVER_CFLAGS='-Werror -DSERVER_TEST' + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: legacy unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-server test all --accurate + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-unit-tests --accurate test-ubuntu-tls: runs-on: ubuntu-latest @@ -251,42 +250,42 @@ jobs: !contains(github.event.inputs.skipjobs, 'tls') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | - make BUILD_TLS=yes SERVER_CFLAGS='-Werror' - - name: testprep - run: | - sudo apt-get install tcl8.6 tclx tcl-tls - ./utils/gen-test-certs.sh - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: | - ./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: | - CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: | - ./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: | - ./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | + make BUILD_TLS=yes SERVER_CFLAGS='-Werror' + - name: testprep + run: | + sudo apt-get install tcl8.6 tclx tcl-tls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}} test-ubuntu-tls-no-tls: runs-on: ubuntu-latest @@ -295,42 +294,42 @@ jobs: !contains(github.event.inputs.skipjobs, 'tls') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | - make BUILD_TLS=yes SERVER_CFLAGS='-Werror' - - name: testprep - run: | - sudo apt-get install tcl8.6 tclx tcl-tls - ./utils/gen-test-certs.sh - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: | - ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: | - CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: | - ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: | - ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | + make BUILD_TLS=yes SERVER_CFLAGS='-Werror' + - name: testprep + run: | + sudo apt-get install tcl8.6 tclx tcl-tls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster ${{github.event.inputs.cluster_test_args}} test-ubuntu-io-threads: runs-on: ubuntu-latest @@ -339,30 +338,30 @@ jobs: !contains(github.event.inputs.skipjobs, 'iothreads') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | - make SERVER_CFLAGS='-Werror' - - name: testprep - run: sudo apt-get install tcl8.6 tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --config io-threads 4 --config io-threads-do-reads yes --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster --config io-threads 4 --config io-threads-do-reads yes ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | + make SERVER_CFLAGS='-Werror' + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --config io-threads 4 --config io-threads-do-reads yes --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster --config io-threads 4 --config io-threads-do-reads yes ${{github.event.inputs.cluster_test_args}} test-ubuntu-reclaim-cache: runs-on: ubuntu-latest @@ -371,76 +370,72 @@ jobs: !contains(github.event.inputs.skipjobs, 'specific') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | - make SERVER_CFLAGS='-Werror' - - name: testprep - run: | - sudo apt-get install vmtouch - mkdir /tmp/master - mkdir /tmp/slave - - name: warm up - run: | - ./src/valkey-server --daemonize yes --logfile /dev/null - ./src/valkey-benchmark -n 1 > /dev/null - ./src/valkey-cli save | grep OK > /dev/null - vmtouch -v ./dump.rdb > /dev/null - - name: test - run: | - echo "test SAVE doesn't increase cache" - CACHE0=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') - echo "$CACHE0" - ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080 --repl-diskless-sync no --pidfile /tmp/master/valkey.pid --rdbcompression no --enable-debug-command yes - ./src/valkey-cli -p 8080 debug populate 10000 k 102400 - ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/slave --port 8081 --repl-diskless-load disabled --rdbcompression no - ./src/valkey-cli -p 8080 save > /dev/null - VMOUT=$(vmtouch -v /tmp/master/dump.rdb) - echo $VMOUT - grep -q " 0%" <<< $VMOUT - CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') - echo "$CACHE" - if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | + make SERVER_CFLAGS='-Werror' + - name: testprep + run: "sudo apt-get install vmtouch\nmkdir /tmp/master \nmkdir /tmp/slave\n" + - name: warm up + run: | + ./src/valkey-server --daemonize yes --logfile /dev/null + ./src/valkey-benchmark -n 1 > /dev/null + ./src/valkey-cli save | grep OK > /dev/null + vmtouch -v ./dump.rdb > /dev/null + - name: test + run: | + echo "test SAVE doesn't increase cache" + CACHE0=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE0" + ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080 --repl-diskless-sync no --pidfile /tmp/master/valkey.pid --rdbcompression no --enable-debug-command yes + ./src/valkey-cli -p 8080 debug populate 10000 k 102400 + ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/slave --port 8081 --repl-diskless-load disabled --rdbcompression no + ./src/valkey-cli -p 8080 save > /dev/null + VMOUT=$(vmtouch -v /tmp/master/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE" + if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi + echo "test replication doesn't increase cache" + ./src/valkey-cli -p 8081 REPLICAOF 127.0.0.1 8080 > /dev/null + while [ $(./src/valkey-cli -p 8081 info replication | grep "master_link_status:down") ]; do sleep 1; done; + sleep 1 # wait for the completion of cache reclaim bio + VMOUT=$(vmtouch -v /tmp/master/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + VMOUT=$(vmtouch -v /tmp/slave/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE" + if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi - echo "test replication doesn't increase cache" - ./src/valkey-cli -p 8081 REPLICAOF 127.0.0.1 8080 > /dev/null - while [ $(./src/valkey-cli -p 8081 info replication | grep "master_link_status:down") ]; do sleep 1; done; - sleep 1 # wait for the completion of cache reclaim bio - VMOUT=$(vmtouch -v /tmp/master/dump.rdb) - echo $VMOUT - grep -q " 0%" <<< $VMOUT - VMOUT=$(vmtouch -v /tmp/slave/dump.rdb) - echo $VMOUT - grep -q " 0%" <<< $VMOUT - CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') - echo "$CACHE" - if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi - - echo "test reboot doesn't increase cache" - PID=$(cat /tmp/master/valkey.pid) - kill -15 $PID - while [ -x /proc/${PID} ]; do sleep 1; done - ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080 - while [ $(./src/valkey-cli -p 8080 info persistence | grep "loading:1") ]; do sleep 1; done; - sleep 1 # wait for the completion of cache reclaim bio - VMOUT=$(vmtouch -v /tmp/master/dump.rdb) - echo $VMOUT - grep -q " 0%" <<< $VMOUT - CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') - echo "$CACHE" - if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi + echo "test reboot doesn't increase cache" + PID=$(cat /tmp/master/valkey.pid) + kill -15 $PID + while [ -x /proc/${PID} ]; do sleep 1; done + ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080 + while [ $(./src/valkey-cli -p 8080 info persistence | grep "loading:1") ]; do sleep 1; done; + sleep 1 # wait for the completion of cache reclaim bio + VMOUT=$(vmtouch -v /tmp/master/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE" + if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi test-valgrind-test: runs-on: ubuntu-latest @@ -449,28 +444,28 @@ jobs: !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST' - - name: testprep - run: | - sudo apt-get update - sudo apt-get install tcl8.6 tclx valgrind -y - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST' + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} test-valgrind-misc: runs-on: ubuntu-latest @@ -479,33 +474,33 @@ jobs: !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest')) timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST' - - name: testprep - run: | - sudo apt-get update - sudo apt-get install tcl8.6 tclx valgrind -y - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} - - name: unittest - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: | - valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind - if grep -q 0x err.txt; then cat err.txt; exit 1; fi + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make valgrind SERVER_CFLAGS='-Werror -DSERVER_TEST' + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind + if grep -q 0x err.txt; then cat err.txt; exit 1; fi test-valgrind-no-malloc-usable-size-test: runs-on: ubuntu-latest @@ -514,28 +509,28 @@ jobs: !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror' - - name: testprep - run: | - sudo apt-get update - sudo apt-get install tcl8.6 tclx valgrind -y - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror' + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} test-valgrind-no-malloc-usable-size-misc: runs-on: ubuntu-latest @@ -544,33 +539,33 @@ jobs: !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest')) timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror' - - name: testprep - run: | - sudo apt-get update - sudo apt-get install tcl8.6 tclx valgrind -y - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} - - name: unittest - if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: | - valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind - if grep -q 0x err.txt; then cat err.txt; exit 1; fi + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE -DSERVER_TEST" SERVER_CFLAGS='-Werror' + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-server test all --valgrind + if grep -q 0x err.txt; then cat err.txt; exit 1; fi test-sanitizer-address: runs-on: ubuntu-latest @@ -580,7 +575,7 @@ jobs: timeout-minutes: 14400 strategy: matrix: - compiler: [ gcc, clang ] + compiler: [gcc, clang] env: CC: ${{ matrix.compiler }} steps: @@ -630,7 +625,7 @@ jobs: timeout-minutes: 14400 strategy: matrix: - compiler: [ gcc, clang ] + compiler: [gcc, clang] env: CC: ${{ matrix.compiler }} steps: @@ -674,8 +669,8 @@ jobs: test-rpm-distros-jemalloc: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'rpm-distros') + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'rpm-distros') strategy: fail-fast: false matrix: @@ -701,45 +696,45 @@ jobs: timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: Install EPEL - if: matrix.install_epel - run: dnf -y install epel-release - - name: make - run: | - dnf -y install gcc make procps-ng which /usr/bin/kill - make -j SERVER_CFLAGS='-Werror' - - name: testprep - run: dnf -y install tcl tcltls - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release + - name: make + run: | + dnf -y install gcc make procps-ng which /usr/bin/kill + make -j SERVER_CFLAGS='-Werror' + - name: testprep + run: dnf -y install tcl tcltls + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} test-rpm-distros-tls-module: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'tls') + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'tls') strategy: fail-fast: false matrix: @@ -765,51 +760,51 @@ jobs: timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: Install EPEL - if: matrix.install_epel - run: dnf -y install epel-release - - name: make - run: | - dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill - make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' - - name: testprep - run: | - dnf -y install tcl tcltls - ./utils/gen-test-certs.sh - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: | - ./runtest --accurate --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: | - CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: | - ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: | - ./runtest-cluster --tls-module ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release + - name: make + run: | + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill + make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' + - name: testprep + run: | + dnf -y install tcl tcltls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest --accurate --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster --tls-module ${{github.event.inputs.cluster_test_args}} test-rpm-distros-tls-module-no-tls: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'tls') + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'tls') strategy: fail-fast: false matrix: @@ -835,46 +830,46 @@ jobs: timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: Install EPEL - if: matrix.install_epel - run: dnf -y install epel-release - - name: make - run: | - dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill - make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' - - name: testprep - run: | - dnf -y install tcl tcltls - ./utils/gen-test-certs.sh - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: | - ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: | - CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: | - ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: | - ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release + - name: make + run: | + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill + make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' + - name: testprep + run: | + dnf -y install tcl tcltls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster ${{github.event.inputs.cluster_test_args}} test-macos-latest: runs-on: macos-latest @@ -883,27 +878,27 @@ jobs: !contains(github.event.inputs.skipjobs, 'macos') && !(contains(github.event.inputs.skiptests, 'valkey') && contains(github.event.inputs.skiptests, 'modules')) timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make SERVER_CFLAGS='-Werror' - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make SERVER_CFLAGS='-Werror' + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} test-macos-latest-sentinel: runs-on: macos-latest @@ -912,24 +907,24 @@ jobs: !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'sentinel') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make SERVER_CFLAGS='-Werror' - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make SERVER_CFLAGS='-Werror' + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} test-macos-latest-cluster: runs-on: macos-latest @@ -938,24 +933,24 @@ jobs: !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'cluster') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make SERVER_CFLAGS='-Werror' - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make SERVER_CFLAGS='-Werror' + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} build-macos: strategy: @@ -967,24 +962,24 @@ jobs: !contains(github.event.inputs.skipjobs, 'macos') timeout-minutes: 14400 steps: - - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 - with: - xcode-version: latest - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make SERVER_CFLAGS='-Werror -DSERVER_TEST' + - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 + with: + xcode-version: latest + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make SERVER_CFLAGS='-Werror -DSERVER_TEST' test-freebsd: runs-on: macos-12 @@ -993,26 +988,26 @@ jobs: !contains(github.event.inputs.skipjobs, 'freebsd') timeout-minutes: 14400 steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: test - uses: cross-platform-actions/action@5800fa0060a22edf69992a779adac3d2bb3a6f8a # v0.22.0 - with: - operating_system: freebsd - environment_variables: MAKE - version: 13.2 - shell: bash - run: | - sudo pkg install -y bash gmake lang/tcl86 lang/tclx - gmake - ./runtest --single unit/keyspace --single unit/auth --single unit/networking --single unit/protocol + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: test + uses: cross-platform-actions/action@5800fa0060a22edf69992a779adac3d2bb3a6f8a # v0.22.0 + with: + operating_system: freebsd + environment_variables: MAKE + version: 13.2 + shell: bash + run: | + sudo pkg install -y bash gmake lang/tcl86 lang/tclx + gmake + ./runtest --single unit/keyspace --single unit/auth --single unit/networking --single unit/protocol test-alpine-jemalloc: runs-on: ubuntu-latest @@ -1021,37 +1016,37 @@ jobs: !contains(github.event.inputs.skipjobs, 'alpine') container: alpine:latest steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | apk add build-base make SERVER_CFLAGS='-Werror' - - name: testprep - run: apk add tcl procps tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: testprep + run: apk add tcl procps tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} test-alpine-libc-malloc: runs-on: ubuntu-latest @@ -1060,37 +1055,37 @@ jobs: !contains(github.event.inputs.skipjobs, 'alpine') container: alpine:latest steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: | + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: | apk add build-base make SERVER_CFLAGS='-Werror' USE_JEMALLOC=no CFLAGS=-DUSE_MALLOC_USABLE_SIZE - - name: testprep - run: apk add tcl procps tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: testprep + run: apk add tcl procps tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} reply-schemas-validator: runs-on: ubuntu-latest @@ -1099,41 +1094,41 @@ jobs: (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && !contains(github.event.inputs.skipjobs, 'reply-schema') steps: - - name: prep - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV - echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - echo "skipjobs: ${{github.event.inputs.skipjobs}}" - echo "skiptests: ${{github.event.inputs.skiptests}}" - echo "test_args: ${{github.event.inputs.test_args}}" - echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ env.GITHUB_REPOSITORY }} - ref: ${{ env.GITHUB_HEAD_REF }} - - name: make - run: make SERVER_CFLAGS='-Werror -DLOG_REQ_RES' - - name: testprep - run: sudo apt-get install tcl8.6 tclx - - name: test - if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --log-req-res --no-latency --dont-clean --force-resp3 --tags -slow --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: module api test - if: true && !contains(github.event.inputs.skiptests, 'modules') - run: CFLAGS='-Werror' ./runtest-moduleapi --log-req-res --no-latency --dont-clean --force-resp3 --dont-pre-clean --verbose --dump-logs ${{github.event.inputs.test_args}} - - name: sentinel tests - if: true && !contains(github.event.inputs.skiptests, 'sentinel') - run: ./runtest-sentinel --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}} - - name: cluster tests - if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}} - - name: Install Python dependencies - uses: py-actions/py-dependency-install@30aa0023464ed4b5b116bd9fbdab87acf01a484e # v4.1.0 - with: - path: "./utils/req-res-validator/requirements.txt" - - name: validator - run: ./utils/req-res-log-validator.py --verbose --fail-missing-reply-schemas ${{ (!contains(github.event.inputs.skiptests, 'valkey') && !contains(github.event.inputs.skiptests, 'module') && !contains(github.event.inputs.sentinel, 'valkey') && !contains(github.event.inputs.skiptests, 'cluster')) && github.event.inputs.test_args == '' && github.event.inputs.cluster_test_args == '' && '--fail-commands-not-all-hit' || '' }} + - name: prep + if: github.event_name == 'workflow_dispatch' + run: | + echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + - name: make + run: make SERVER_CFLAGS='-Werror -DLOG_REQ_RES' + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --log-req-res --no-latency --dont-clean --force-resp3 --tags -slow --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --log-req-res --no-latency --dont-clean --force-resp3 --dont-pre-clean --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}} + - name: Install Python dependencies + uses: py-actions/py-dependency-install@30aa0023464ed4b5b116bd9fbdab87acf01a484e # v4.1.0 + with: + path: "./utils/req-res-validator/requirements.txt" + - name: validator + run: ./utils/req-res-log-validator.py --verbose --fail-missing-reply-schemas ${{ (!contains(github.event.inputs.skiptests, 'valkey') && !contains(github.event.inputs.skiptests, 'module') && !contains(github.event.inputs.sentinel, 'valkey') && !contains(github.event.inputs.skiptests, 'cluster')) && github.event.inputs.test_args == '' && github.event.inputs.cluster_test_args == '' && '--fail-commands-not-all-hit' || '' }} notify-about-job-results: runs-on: ubuntu-latest @@ -1151,7 +1146,7 @@ jobs: FAILED_JOBS+=($JOB) fi done - + if [[ ${#FAILED_JOBS[@]} -ne 0 ]]; then echo "FAILED_JOBS=${FAILED_JOBS[@]}" >> $GITHUB_ENV echo "STATUS=failure" >> $GITHUB_ENV diff --git a/.github/workflows/external.yml b/.github/workflows/external.yml index 8946526c8d..e021a964d1 100644 --- a/.github/workflows/external.yml +++ b/.github/workflows/external.yml @@ -15,53 +15,53 @@ jobs: if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' timeout-minutes: 14400 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Build - run: make SERVER_CFLAGS=-Werror - - name: Start valkey-server - run: | - ./src/valkey-server --daemonize yes --save "" --logfile external-server.log \ - --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes - - name: Run external test - run: | + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Build + run: make SERVER_CFLAGS=-Werror + - name: Start valkey-server + run: | + ./src/valkey-server --daemonize yes --save "" --logfile external-server.log \ + --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes + - name: Run external test + run: | ./runtest \ --host 127.0.0.1 --port 6379 \ --verbose \ --tags -slow - - name: Archive server log - if: ${{ failure() }} - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 - with: - name: test-external-standalone-log - path: external-server.log + - name: Archive server log + if: ${{ failure() }} + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: test-external-standalone-log + path: external-server.log test-external-cluster: runs-on: ubuntu-latest if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' timeout-minutes: 14400 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Build - run: make SERVER_CFLAGS=-Werror - - name: Start valkey-server - run: | - ./src/valkey-server --cluster-enabled yes --daemonize yes --save "" --logfile external-server.log \ - --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes - - name: Create a single node cluster - run: ./src/valkey-cli cluster addslots $(for slot in {0..16383}; do echo $slot; done); sleep 5 - - name: Run external test - run: | + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Build + run: make SERVER_CFLAGS=-Werror + - name: Start valkey-server + run: | + ./src/valkey-server --cluster-enabled yes --daemonize yes --save "" --logfile external-server.log \ + --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes + - name: Create a single node cluster + run: ./src/valkey-cli cluster addslots $(for slot in {0..16383}; do echo $slot; done); sleep 5 + - name: Run external test + run: | ./runtest \ --host 127.0.0.1 --port 6379 \ --verbose \ --cluster-mode \ --tags -slow - - name: Archive server log - if: ${{ failure() }} - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 - with: - name: test-external-cluster-log - path: external-server.log + - name: Archive server log + if: ${{ failure() }} + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: test-external-cluster-log + path: external-server.log test-external-nodebug: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index c660bc3600..a1b72a462e 100644 --- a/.gitignore +++ b/.gitignore @@ -45,4 +45,5 @@ redis.code-workspace .cache .cscope* .swp +nodes.conf tests/cluster/tmp/* diff --git a/COPYING b/COPYING index 10928babb3..2058f57e56 100644 --- a/COPYING +++ b/COPYING @@ -1,5 +1,7 @@ # License 1 +BSD 3-Clause License + Copyright (c) 2024-present, Valkey contributors All rights reserved. @@ -13,6 +15,8 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # License 2 +BSD 3-Clause License + Copyright (c) 2006-2020, Salvatore Sanfilippo All rights reserved. diff --git a/codecov.yml b/codecov.yml index c8d096e44f..05e315dcc3 100644 --- a/codecov.yml +++ b/codecov.yml @@ -16,4 +16,4 @@ comment: behavior: default github_checks: - annotations: false \ No newline at end of file + annotations: false diff --git a/src/Makefile b/src/Makefile index 6defebed8d..302ad06b84 100644 --- a/src/Makefile +++ b/src/Makefile @@ -150,6 +150,11 @@ DEBUG=-g -ggdb # Linux ARM32 needs -latomic at linking time ifneq (,$(findstring armv,$(uname_M))) FINAL_LIBS+=-latomic +else +# Linux POWER needs -latomic at linking time +ifneq (,$(findstring ppc,$(uname_M))) + FINAL_LIBS+=-latomic +endif endif ifeq ($(uname_S),SunOS) diff --git a/src/acl.c b/src/acl.c index 0c3ccb7f6d..bda449e8d2 100644 --- a/src/acl.c +++ b/src/acl.c @@ -506,7 +506,7 @@ void ACLFreeUserAndKillClients(user *u) { * more defensive to set the default user and put * it in non authenticated mode. */ c->user = DefaultUser; - c->authenticated = 0; + c->flags &= ~CLIENT_AUTHENTICATED; /* We will write replies to this client later, so we can't * close it directly even if async. */ if (c == server.current_client) { @@ -1494,7 +1494,7 @@ void addAuthErrReply(client *c, robj *err) { * The return value is AUTH_OK on success (valid username / password pair) & AUTH_ERR otherwise. */ int checkPasswordBasedAuth(client *c, robj *username, robj *password) { if (ACLCheckUserCredentials(username, password) == C_OK) { - c->authenticated = 1; + c->flags |= CLIENT_AUTHENTICATED; c->user = ACLGetUserByName(username->ptr, sdslen(username->ptr)); moduleNotifyUserChanged(c); return AUTH_OK; @@ -1587,12 +1587,10 @@ static int ACLSelectorCheckKey(aclSelector *selector, const char *key, int keyle listRewind(selector->patterns, &li); int key_flags = 0; - /* clang-format off */ if (keyspec_flags & CMD_KEY_ACCESS) key_flags |= ACL_READ_PERMISSION; if (keyspec_flags & CMD_KEY_INSERT) key_flags |= ACL_WRITE_PERMISSION; if (keyspec_flags & CMD_KEY_DELETE) key_flags |= ACL_WRITE_PERMISSION; if (keyspec_flags & CMD_KEY_UPDATE) key_flags |= ACL_WRITE_PERMISSION; - /* clang-format on */ /* Test this key against every pattern. */ while ((ln = listNext(&li))) { @@ -1618,12 +1616,10 @@ static int ACLSelectorHasUnrestrictedKeyAccess(aclSelector *selector, int flags) listRewind(selector->patterns, &li); int access_flags = 0; - /* clang-format off */ if (flags & CMD_KEY_ACCESS) access_flags |= ACL_READ_PERMISSION; if (flags & CMD_KEY_INSERT) access_flags |= ACL_WRITE_PERMISSION; if (flags & CMD_KEY_DELETE) access_flags |= ACL_WRITE_PERMISSION; if (flags & CMD_KEY_UPDATE) access_flags |= ACL_WRITE_PERMISSION; - /* clang-format on */ /* Test this key against every pattern. */ while ((ln = listNext(&li))) { @@ -1717,7 +1713,7 @@ static int ACLSelectorCheckCmd(aclSelector *selector, * mentioned in the command arguments. */ if (!(selector->flags & SELECTOR_FLAG_ALLKEYS) && doesCommandHaveKeys(cmd)) { if (!(cache->keys_init)) { - cache->keys = (getKeysResult)GETKEYS_RESULT_INIT; + initGetKeysResult(&(cache->keys)); getKeysFromCommandWithSpecs(cmd, argv, argc, GET_KEYSPEC_DEFAULT, &(cache->keys)); cache->keys_init = 1; } @@ -1737,7 +1733,8 @@ static int ACLSelectorCheckCmd(aclSelector *selector, * mentioned in the command arguments */ const int channel_flags = CMD_CHANNEL_PUBLISH | CMD_CHANNEL_SUBSCRIBE; if (!(selector->flags & SELECTOR_FLAG_ALLCHANNELS) && doesCommandHaveChannelsWithFlags(cmd, channel_flags)) { - getKeysResult channels = (getKeysResult)GETKEYS_RESULT_INIT; + getKeysResult channels; + initGetKeysResult(&channels); getChannelsFromCommand(cmd, argv, argc, &channels); keyReference *channelref = channels.keys; for (int j = 0; j < channels.numkeys; j++) { @@ -2669,15 +2666,13 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username if (object) { le->object = object; } else { - /* clang-format off */ - switch(reason) { + switch (reason) { case ACL_DENIED_CMD: le->object = sdsdup(c->cmd->fullname); break; case ACL_DENIED_KEY: le->object = sdsdup(c->argv[argpos]->ptr); break; case ACL_DENIED_CHANNEL: le->object = sdsdup(c->argv[argpos]->ptr); break; case ACL_DENIED_AUTH: le->object = sdsdup(c->argv[0]->ptr); break; default: le->object = sdsempty(); } - /* clang-format on */ } /* if we have a real client from the network, use it (could be missing on module timers) */ @@ -3058,28 +3053,24 @@ void aclCommand(client *c) { addReplyBulkCString(c, "reason"); char *reasonstr; - /* clang-format off */ - switch(le->reason) { - case ACL_DENIED_CMD: reasonstr="command"; break; - case ACL_DENIED_KEY: reasonstr="key"; break; - case ACL_DENIED_CHANNEL: reasonstr="channel"; break; - case ACL_DENIED_AUTH: reasonstr="auth"; break; - default: reasonstr="unknown"; + switch (le->reason) { + case ACL_DENIED_CMD: reasonstr = "command"; break; + case ACL_DENIED_KEY: reasonstr = "key"; break; + case ACL_DENIED_CHANNEL: reasonstr = "channel"; break; + case ACL_DENIED_AUTH: reasonstr = "auth"; break; + default: reasonstr = "unknown"; } - /* clang-format on */ addReplyBulkCString(c, reasonstr); addReplyBulkCString(c, "context"); char *ctxstr; - /* clang-format off */ - switch(le->context) { - case ACL_LOG_CTX_TOPLEVEL: ctxstr="toplevel"; break; - case ACL_LOG_CTX_MULTI: ctxstr="multi"; break; - case ACL_LOG_CTX_LUA: ctxstr="lua"; break; - case ACL_LOG_CTX_MODULE: ctxstr="module"; break; - default: ctxstr="unknown"; + switch (le->context) { + case ACL_LOG_CTX_TOPLEVEL: ctxstr = "toplevel"; break; + case ACL_LOG_CTX_MULTI: ctxstr = "multi"; break; + case ACL_LOG_CTX_LUA: ctxstr = "lua"; break; + case ACL_LOG_CTX_MODULE: ctxstr = "module"; break; + default: ctxstr = "unknown"; } - /* clang-format on */ addReplyBulkCString(c, ctxstr); addReplyBulkCString(c, "object"); diff --git a/src/ae_kqueue.c b/src/ae_kqueue.c index 3cb6fbae4a..4159f25744 100644 --- a/src/ae_kqueue.c +++ b/src/ae_kqueue.c @@ -101,31 +101,24 @@ static void aeApiFree(aeEventLoop *eventLoop) { static int aeApiAddEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; - struct kevent ke; + struct kevent evs[2]; + int nch = 0; - if (mask & AE_READABLE) { - EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); - if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; - } - if (mask & AE_WRITABLE) { - EV_SET(&ke, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); - if (kevent(state->kqfd, &ke, 1, NULL, 0, NULL) == -1) return -1; - } - return 0; + if (mask & AE_READABLE) EV_SET(evs + nch++, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); + if (mask & AE_WRITABLE) EV_SET(evs + nch++, fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL); + + return kevent(state->kqfd, evs, nch, NULL, 0, NULL); } static void aeApiDelEvent(aeEventLoop *eventLoop, int fd, int mask) { aeApiState *state = eventLoop->apidata; - struct kevent ke; + struct kevent evs[2]; + int nch = 0; - if (mask & AE_READABLE) { - EV_SET(&ke, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); - kevent(state->kqfd, &ke, 1, NULL, 0, NULL); - } - if (mask & AE_WRITABLE) { - EV_SET(&ke, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); - kevent(state->kqfd, &ke, 1, NULL, 0, NULL); - } + if (mask & AE_READABLE) EV_SET(evs + nch++, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); + if (mask & AE_WRITABLE) EV_SET(evs + nch++, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); + + kevent(state->kqfd, evs, nch, NULL, 0, NULL); } static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) { diff --git a/src/aof.c b/src/aof.c index f3538f64fa..ac9ffd5fcb 100644 --- a/src/aof.c +++ b/src/aof.c @@ -904,12 +904,12 @@ int aofFsyncInProgress(void) { /* Starts a background task that performs fsync() against the specified * file descriptor (the one of the AOF file) in another thread. */ void aof_background_fsync(int fd) { - bioCreateFsyncJob(fd, server.master_repl_offset, 1); + bioCreateFsyncJob(fd, server.primary_repl_offset, 1); } /* Close the fd on the basis of aof_background_fsync. */ void aof_background_fsync_and_close(int fd) { - bioCreateCloseAofJob(fd, server.master_repl_offset, 1); + bioCreateCloseAofJob(fd, server.primary_repl_offset, 1); } /* Kills an AOFRW child process if exists */ @@ -1069,11 +1069,12 @@ void flushAppendOnlyFile(int force) { } else { /* All data is fsync'd already: Update fsynced_reploff_pending just in case. * This is needed to avoid a WAITAOF hang in case a module used RM_Call with the NO_AOF flag, - * in which case master_repl_offset will increase but fsynced_reploff_pending won't be updated + * in which case primary_repl_offset will increase but fsynced_reploff_pending won't be updated * (because there's no reason, from the AOF POV, to call fsync) and then WAITAOF may wait on * the higher offset (which contains data that was only propagated to replicas, and not to AOF) */ if (!sync_in_progress && server.aof_fsync != AOF_FSYNC_NO) - atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, + memory_order_relaxed); return; } } @@ -1243,7 +1244,7 @@ void flushAppendOnlyFile(int force) { latencyAddSampleIfNeeded("aof-fsync-always", latency); server.aof_last_incr_fsync_offset = server.aof_last_incr_size; server.aof_last_fsync = server.mstime; - atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, memory_order_relaxed); } else if (server.aof_fsync == AOF_FSYNC_EVERYSEC && server.mstime - server.aof_last_fsync >= 1000) { if (!sync_in_progress) { aof_background_fsync(server.aof_fd); @@ -1355,7 +1356,7 @@ struct client *createAOFClient(void) { c->id = CLIENT_ID_AOF; /* So modules can identify it's the AOF client. */ /* - * The AOF client should never be blocked (unlike master + * The AOF client should never be blocked (unlike primary * replication connection). * This is because blocking the AOF client might cause * deadlock (because potentially no one will unblock it). @@ -1365,9 +1366,9 @@ struct client *createAOFClient(void) { */ c->flags = CLIENT_DENY_BLOCKING; - /* We set the fake client as a slave waiting for the synchronization + /* We set the fake client as a replica waiting for the synchronization * so that the server will not try to send replies to this client. */ - c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + c->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; return c; } @@ -1993,21 +1994,19 @@ int rioWriteStreamPendingEntry(rio *r, RETRYCOUNT JUSTID FORCE. */ streamID id; streamDecodeID(rawid, &id); - /* clang-format off */ - if (rioWriteBulkCount(r,'*',12) == 0) return 0; - if (rioWriteBulkString(r,"XCLAIM",6) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,groupname,groupname_len) == 0) return 0; - if (rioWriteBulkString(r,consumer->name,sdslen(consumer->name)) == 0) return 0; - if (rioWriteBulkString(r,"0",1) == 0) return 0; - if (rioWriteBulkStreamID(r,&id) == 0) return 0; - if (rioWriteBulkString(r,"TIME",4) == 0) return 0; - if (rioWriteBulkLongLong(r,nack->delivery_time) == 0) return 0; - if (rioWriteBulkString(r,"RETRYCOUNT",10) == 0) return 0; - if (rioWriteBulkLongLong(r,nack->delivery_count) == 0) return 0; - if (rioWriteBulkString(r,"JUSTID",6) == 0) return 0; - if (rioWriteBulkString(r,"FORCE",5) == 0) return 0; - /* clang-format on */ + if (rioWriteBulkCount(r, '*', 12) == 0) return 0; + if (rioWriteBulkString(r, "XCLAIM", 6) == 0) return 0; + if (rioWriteBulkObject(r, key) == 0) return 0; + if (rioWriteBulkString(r, groupname, groupname_len) == 0) return 0; + if (rioWriteBulkString(r, consumer->name, sdslen(consumer->name)) == 0) return 0; + if (rioWriteBulkString(r, "0", 1) == 0) return 0; + if (rioWriteBulkStreamID(r, &id) == 0) return 0; + if (rioWriteBulkString(r, "TIME", 4) == 0) return 0; + if (rioWriteBulkLongLong(r, nack->delivery_time) == 0) return 0; + if (rioWriteBulkString(r, "RETRYCOUNT", 10) == 0) return 0; + if (rioWriteBulkLongLong(r, nack->delivery_count) == 0) return 0; + if (rioWriteBulkString(r, "JUSTID", 6) == 0) return 0; + if (rioWriteBulkString(r, "FORCE", 5) == 0) return 0; return 1; } @@ -2020,14 +2019,12 @@ int rioWriteStreamEmptyConsumer(rio *r, size_t groupname_len, streamConsumer *consumer) { /* XGROUP CREATECONSUMER */ - /* clang-format off */ - if (rioWriteBulkCount(r,'*',5) == 0) return 0; - if (rioWriteBulkString(r,"XGROUP",6) == 0) return 0; - if (rioWriteBulkString(r,"CREATECONSUMER",14) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,groupname,groupname_len) == 0) return 0; - if (rioWriteBulkString(r,consumer->name,sdslen(consumer->name)) == 0) return 0; - /* clang-format on */ + if (rioWriteBulkCount(r, '*', 5) == 0) return 0; + if (rioWriteBulkString(r, "XGROUP", 6) == 0) return 0; + if (rioWriteBulkString(r, "CREATECONSUMER", 14) == 0) return 0; + if (rioWriteBulkObject(r, key) == 0) return 0; + if (rioWriteBulkString(r, groupname, groupname_len) == 0) return 0; + if (rioWriteBulkString(r, consumer->name, sdslen(consumer->name)) == 0) return 0; return 1; } @@ -2320,7 +2317,7 @@ int rewriteAppendOnlyFile(char *filename) { if (server.aof_use_rdb_preamble) { int error; - if (rdbSaveRio(SLAVE_REQ_NONE, &aof, &error, RDBFLAGS_AOF_PREAMBLE, NULL) == C_ERR) { + if (rdbSaveRio(REPLICA_REQ_NONE, &aof, &error, RDBFLAGS_AOF_PREAMBLE, NULL) == C_ERR) { errno = error; goto werr; } @@ -2403,12 +2400,12 @@ int rewriteAppendOnlyFileBackground(void) { * between updates to `fsynced_reploff_pending` of the worker thread, belonging * to the previous AOF, and the new one. This concern is specific for a full * sync scenario where we don't wanna risk the ACKed replication offset - * jumping backwards or forward when switching to a different master. */ + * jumping backwards or forward when switching to a different primary. */ bioDrainWorker(BIO_AOF_FSYNC); /* Set the initial repl_offset, which will be applied to fsynced_reploff * when AOFRW finishes (after possibly being updated by a bio thread) */ - atomic_store_explicit(&server.fsynced_reploff_pending, server.master_repl_offset, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, memory_order_relaxed); server.fsynced_reploff = 0; } diff --git a/src/blocked.c b/src/blocked.c index 85ef9170a0..08abac15e3 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -86,8 +86,8 @@ void initClientBlockingState(client *c) { * flag is set client query buffer is not longer processed, but accumulated, * and will be processed when the client is unblocked. */ void blockClient(client *c, int btype) { - /* Master client should never be blocked unless pause or module */ - serverAssert(!(c->flags & CLIENT_MASTER && btype != BLOCKED_MODULE && btype != BLOCKED_POSTPONE)); + /* Primary client should never be blocked unless pause or module */ + serverAssert(!(c->flags & CLIENT_PRIMARY && btype != BLOCKED_MODULE && btype != BLOCKED_POSTPONE)); c->flags |= CLIENT_BLOCKED; c->bstate.btype = btype; @@ -265,8 +265,8 @@ void replyToClientsBlockedOnShutdown(void) { /* Mass-unblock clients because something changed in the instance that makes * blocking no longer safe. For example clients blocked in list operations - * in an instance which turns from master to slave is unsafe, so this function - * is called when a master turns into a slave. + * in an instance which turns from primary to replica is unsafe, so this function + * is called when a primary turns into a replica. * * The semantics is to send an -UNBLOCKED error to the client, disconnecting * it at the same time. */ diff --git a/src/cluster.c b/src/cluster.c index 71d1cc9124..8aa6793ba8 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -813,12 +813,12 @@ void clusterCommandHelp(client *c) { " Return the node's shard id.", "NODES", " Return cluster configuration seen by node. Output format:", - " ...", + " ...", "REPLICAS ", " Return replicas.", "SLOTS", " Return information about slots range mappings. Each range is made of:", - " start, end, master and replicas IP addresses, ports and ids", + " start, end, primary and replicas IP addresses, ports and ids", "SHARDS", " Return information about slot range mappings and the nodes associated with them.", NULL}; @@ -900,7 +900,6 @@ void clusterCommand(client *c) { } kvstoreReleaseDictIterator(kvs_di); } else if ((!strcasecmp(c->argv[1]->ptr, "slaves") || !strcasecmp(c->argv[1]->ptr, "replicas")) && c->argc == 3) { - /* CLUSTER SLAVES */ /* CLUSTER REPLICAS */ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr)); int j; @@ -911,15 +910,15 @@ void clusterCommand(client *c) { return; } - if (clusterNodeIsSlave(n)) { + if (clusterNodeIsReplica(n)) { addReplyError(c, "The specified node is not a master"); return; } /* Report TLS ports to TLS client, and report non-TLS port to non-TLS client. */ - addReplyArrayLen(c, clusterNodeNumSlaves(n)); - for (j = 0; j < clusterNodeNumSlaves(n); j++) { - sds ni = clusterGenNodeDescription(c, clusterNodeGetSlave(n, j), shouldReturnTlsInfo()); + addReplyArrayLen(c, clusterNodeNumReplicas(n)); + for (j = 0; j < clusterNodeNumReplicas(n); j++) { + sds ni = clusterGenNodeDescription(c, clusterNodeGetReplica(n, j), shouldReturnTlsInfo()); addReplyBulkCString(c, ni); sdsfree(ni); } @@ -1018,7 +1017,8 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int margc = ms->commands[i].argc; margv = ms->commands[i].argv; - getKeysResult result = GETKEYS_RESULT_INIT; + getKeysResult result; + initGetKeysResult(&result); numkeys = getKeysFromCommand(mcmd, margv, margc, &result); keyindex = result.keys; @@ -1048,8 +1048,8 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int * can safely serve the request, otherwise we return a TRYAGAIN * error). To do so we set the importing/migrating state and * increment a counter for every missing key. */ - if (clusterNodeIsMaster(myself) || c->flags & CLIENT_READONLY) { - if (n == clusterNodeGetMaster(myself) && getMigratingSlotDest(slot) != NULL) { + if (clusterNodeIsPrimary(myself) || c->flags & CLIENT_READONLY) { + if (n == clusterNodeGetPrimary(myself) && getMigratingSlotDest(slot) != NULL) { migrating_slot = 1; } else if (getImportingSlotSource(slot) != NULL) { importing_slot = 1; @@ -1122,7 +1122,7 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int /* MIGRATE always works in the context of the local node if the slot * is open (migrating or importing state). We need to be able to freely * move keys among instances in this case. */ - if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand && clusterNodeIsMaster(myself)) { + if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand && clusterNodeIsPrimary(myself)) { return myself; } @@ -1152,13 +1152,13 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int } } - /* Handle the read-only client case reading from a slave: if this - * node is a slave and the request is about a hash slot our master + /* Handle the read-only client case reading from a replica: if this + * node is a replica and the request is about a hash slot our primary * is serving, we can reply without redirection. */ int is_write_command = (cmd_flags & CMD_WRITE) || (c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_WRITE)); - if (((c->flags & CLIENT_READONLY) || pubsubshard_included) && !is_write_command && clusterNodeIsSlave(myself) && - clusterNodeGetMaster(myself) == n) { + if (((c->flags & CLIENT_READONLY) || pubsubshard_included) && !is_write_command && clusterNodeIsReplica(myself) && + clusterNodeGetPrimary(myself) == n) { return myself; } @@ -1204,7 +1204,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co * to detect timeouts, in order to handle the following case: * * 1) A client blocks with BLPOP or similar blocking operation. - * 2) The master migrates the hash slot elsewhere or turns into a slave. + * 2) The primary migrates the hash slot elsewhere or turns into a replica. * 3) The client may remain blocked forever (or up to the max timeout time) * waiting for a key change that will never happen. * @@ -1240,8 +1240,8 @@ int clusterRedirectBlockedClientIfNeeded(client *c) { /* if the client is read-only and attempting to access key that our * replica can handle, allow it. */ - if ((c->flags & CLIENT_READONLY) && !(c->lastcmd->flags & CMD_WRITE) && clusterNodeIsSlave(myself) && - clusterNodeGetMaster(myself) == node) { + if ((c->flags & CLIENT_READONLY) && !(c->lastcmd->flags & CMD_WRITE) && clusterNodeIsReplica(myself) && + clusterNodeGetPrimary(myself) == node) { node = myself; } @@ -1331,9 +1331,9 @@ int isNodeAvailable(clusterNode *node) { } void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, int end_slot) { - int i, nested_elements = 3; /* slots (2) + master addr (1) */ - for (i = 0; i < clusterNodeNumSlaves(node); i++) { - if (!isNodeAvailable(clusterNodeGetSlave(node, i))) continue; + int i, nested_elements = 3; /* slots (2) + primary addr (1) */ + for (i = 0; i < clusterNodeNumReplicas(node); i++) { + if (!isNodeAvailable(clusterNodeGetReplica(node, i))) continue; nested_elements++; } addReplyArrayLen(c, nested_elements); @@ -1342,11 +1342,11 @@ void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, in addNodeToNodeReply(c, node); /* Remaining nodes in reply are replicas for slot range */ - for (i = 0; i < clusterNodeNumSlaves(node); i++) { + for (i = 0; i < clusterNodeNumReplicas(node); i++) { /* This loop is copy/pasted from clusterGenNodeDescription() * with modifications for per-slot node aggregation. */ - if (!isNodeAvailable(clusterNodeGetSlave(node, i))) continue; - addNodeToNodeReply(c, clusterNodeGetSlave(node, i)); + if (!isNodeAvailable(clusterNodeGetReplica(node, i))) continue; + addNodeToNodeReply(c, clusterNodeGetReplica(node, i)); nested_elements--; } serverAssert(nested_elements == 3); /* Original 3 elements */ @@ -1364,7 +1364,7 @@ void clearCachedClusterSlotsResponse(void) { sds generateClusterSlotResponse(void) { client *recording_client = createCachedResponseClient(); clusterNode *n = NULL; - int num_masters = 0, start = -1; + int num_primaries = 0, start = -1; void *slot_replylen = addReplyDeferredLen(recording_client); for (int i = 0; i <= CLUSTER_SLOTS; i++) { @@ -1380,13 +1380,13 @@ sds generateClusterSlotResponse(void) { * or end of slot. */ if (i == CLUSTER_SLOTS || n != getNodeBySlot(i)) { addNodeReplyForClusterSlot(recording_client, n, start, i - 1); - num_masters++; + num_primaries++; if (i == CLUSTER_SLOTS) break; n = getNodeBySlot(i); start = i; } } - setDeferredArrayLen(recording_client, slot_replylen, num_masters); + setDeferredArrayLen(recording_client, slot_replylen, num_primaries); sds cluster_slot_response = aggregateClientOutputBuffer(recording_client); deleteCachedResponseClient(recording_client); return cluster_slot_response; @@ -1405,8 +1405,8 @@ int verifyCachedClusterSlotsResponse(sds cached_response) { void clusterCommandSlots(client *c) { /* Format: 1) 1) start slot * 2) end slot - * 3) 1) master IP - * 2) master port + * 3) 1) primary IP + * 2) primary port * 3) node ID * 4) 1) replica IP * 2) replica port @@ -1446,8 +1446,8 @@ void askingCommand(client *c) { } /* The READONLY command is used by clients to enter the read-only mode. - * In this mode slaves will not redirect clients as long as clients access - * with read-only commands to keys that are served by the slave's master. */ + * In this mode replica will not redirect clients as long as clients access + * with read-only commands to keys that are served by the replica's primary. */ void readonlyCommand(client *c) { if (server.cluster_enabled == 0) { addReplyError(c, "This instance has cluster support disabled"); diff --git a/src/cluster.h b/src/cluster.h index de58486440..f163e7f688 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -67,7 +67,7 @@ int clusterCommandSpecial(client *c); const char **clusterCommandExtendedHelp(void); int clusterAllowFailoverCmd(client *c); -void clusterPromoteSelfToMaster(void); +void clusterPromoteSelfToPrimary(void); int clusterManualFailoverTimeLimit(void); void clusterCommandSlots(client *c); @@ -83,18 +83,18 @@ int getClusterSize(void); int getMyShardSlotCount(void); int handleDebugClusterCommand(client *c); int clusterNodePending(clusterNode *node); -int clusterNodeIsMaster(clusterNode *n); +int clusterNodeIsPrimary(clusterNode *n); char **getClusterNodesList(size_t *numnodes); char *clusterNodeIp(clusterNode *node); -int clusterNodeIsSlave(clusterNode *node); -clusterNode *clusterNodeGetMaster(clusterNode *node); +int clusterNodeIsReplica(clusterNode *node); +clusterNode *clusterNodeGetPrimary(clusterNode *node); char *clusterNodeGetName(clusterNode *node); int clusterNodeTimedOut(clusterNode *node); int clusterNodeIsFailing(clusterNode *node); int clusterNodeIsNoFailover(clusterNode *node); char *clusterNodeGetShardId(clusterNode *node); -int clusterNodeNumSlaves(clusterNode *node); -clusterNode *clusterNodeGetSlave(clusterNode *node, int slave_idx); +int clusterNodeNumReplicas(clusterNode *node); +clusterNode *clusterNodeGetReplica(clusterNode *node, int slave_idx); clusterNode *getMigratingSlotDest(int slot); clusterNode *getImportingSlotSource(int slot); clusterNode *getNodeBySlot(int slot); @@ -103,7 +103,6 @@ char *clusterNodeHostname(clusterNode *node); const char *clusterNodePreferredEndpoint(clusterNode *n); long long clusterNodeReplOffset(clusterNode *node); clusterNode *clusterLookupNode(const char *name, int length); -void clusterReplicateOpenSlots(void); int detectAndUpdateCachedNodeHealth(void); client *createCachedResponseClient(void); void deleteCachedResponseClient(client *recording_client); diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 0de6351e90..9482f02167 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -63,14 +63,14 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request); void clusterUpdateState(void); int clusterNodeCoversSlot(clusterNode *n, int slot); list *clusterGetNodesInMyShard(clusterNode *node); -int clusterNodeAddSlave(clusterNode *master, clusterNode *slave); +int clusterNodeAddReplica(clusterNode *primary, clusterNode *replica); int clusterAddSlot(clusterNode *n, int slot); int clusterDelSlot(int slot); int clusterDelNodeSlots(clusterNode *node); int clusterNodeSetSlotBit(clusterNode *n, int slot); -void clusterSetMaster(clusterNode *n, int closeSlots); -void clusterHandleSlaveFailover(void); -void clusterHandleSlaveMigration(int max_slaves); +void clusterSetPrimary(clusterNode *n, int closeSlots); +void clusterHandleReplicaFailover(void); +void clusterHandleReplicaMigration(int max_replicas); int bitmapTestBit(unsigned char *bitmap, int pos); void bitmapSetBit(unsigned char *bitmap, int pos); void bitmapClearBit(unsigned char *bitmap, int pos); @@ -78,7 +78,7 @@ void clusterDoBeforeSleep(int flags); void clusterSendUpdate(clusterLink *link, clusterNode *node); void resetManualFailover(void); void clusterCloseAllSlots(void); -void clusterSetNodeAsMaster(clusterNode *n); +void clusterSetNodeAsPrimary(clusterNode *n); void clusterDelNode(clusterNode *delnode); sds representClusterNodeFlags(sds ci, uint16_t flags); sds representSlotInfo(sds ci, uint16_t *slot_info_pairs, int slot_info_pairs_count); @@ -113,6 +113,14 @@ int auxTlsPortPresent(clusterNode *n); static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen); void freeClusterLink(clusterLink *link); int verifyClusterNodeId(const char *name, int length); +sds clusterEncodeOpenSlotsAuxField(int rdbflags); +int clusterDecodeOpenSlotsAuxField(int rdbflags, sds s); + +/* Only primaries that own slots have voting rights. + * Returns 1 if the node has voting rights, otherwise returns 0. */ +static inline int clusterNodeIsVotingPrimary(clusterNode *n) { + return (n->flags & CLUSTER_NODE_PRIMARY) && n->numslots; +} int getNodeDefaultClientPort(clusterNode *n) { return server.tls_cluster ? n->tls_port : n->tcp_port; @@ -144,7 +152,6 @@ static inline int defaultClientPort(void) { dictType clusterNodesDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -157,7 +164,6 @@ dictType clusterNodesDictType = { dictType clusterNodesBlackListDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -168,7 +174,6 @@ dictType clusterNodesBlackListDictType = { dictType clusterSdsToListType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ @@ -227,8 +232,8 @@ int auxShardIdSetter(clusterNode *n, void *value, int length) { memcpy(n->shard_id, value, CLUSTER_NAMELEN); /* if n already has replicas, make sure they all agree * on the shard id */ - for (int i = 0; i < n->numslaves; i++) { - if (memcmp(n->slaves[i]->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { + for (int i = 0; i < n->num_replicas; i++) { + if (memcmp(n->replicas[i]->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { return C_ERR; } } @@ -361,7 +366,7 @@ int clusterLoadConfig(char *filename) { while (fgets(line, maxline, fp) != NULL) { int argc, aux_argc; sds *argv, *aux_argv; - clusterNode *n, *master; + clusterNode *n, *primary; char *p, *s; /* Skip blank lines, they can be created either by users manually @@ -528,10 +533,10 @@ int clusterLoadConfig(char *filename) { serverAssert(server.cluster->myself == NULL); myself = server.cluster->myself = n; n->flags |= CLUSTER_NODE_MYSELF; - } else if (!strcasecmp(s, "master")) { - n->flags |= CLUSTER_NODE_MASTER; - } else if (!strcasecmp(s, "slave")) { - n->flags |= CLUSTER_NODE_SLAVE; + } else if (!strcasecmp(s, "master") || !strcasecmp(s, "primary")) { + n->flags |= CLUSTER_NODE_PRIMARY; + } else if (!strcasecmp(s, "slave") || !strcasecmp(s, "replica")) { + n->flags |= CLUSTER_NODE_REPLICA; } else if (!strcasecmp(s, "fail?")) { n->flags |= CLUSTER_NODE_PFAIL; } else if (!strcasecmp(s, "fail")) { @@ -546,37 +551,37 @@ int clusterLoadConfig(char *filename) { } else if (!strcasecmp(s, "noflags")) { /* nothing to do */ } else { - serverPanic("Unknown flag in redis cluster config file"); + serverPanic("Unknown flag in %s cluster config file", SERVER_TITLE); } if (p) s = p + 1; } - /* Get master if any. Set the master and populate master's - * slave list. */ + /* Get primary if any. Set the primary and populate primary's + * replica list. */ if (argv[3][0] != '-') { if (verifyClusterNodeId(argv[3], sdslen(argv[3])) == C_ERR) { sdsfreesplitres(argv, argc); goto fmterr; } - master = clusterLookupNode(argv[3], sdslen(argv[3])); - if (!master) { - master = createClusterNode(argv[3], 0); - clusterAddNode(master); + primary = clusterLookupNode(argv[3], sdslen(argv[3])); + if (!primary) { + primary = createClusterNode(argv[3], 0); + clusterAddNode(primary); } /* shard_id can be absent if we are loading a nodes.conf generated - * by an older version of Redis; we should follow the primary's + * by an older version; we should follow the primary's * shard_id in this case */ if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { - memcpy(n->shard_id, master->shard_id, CLUSTER_NAMELEN); - clusterAddNodeToShard(master->shard_id, n); - } else if (clusterGetNodesInMyShard(master) != NULL && - memcmp(master->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { + memcpy(n->shard_id, primary->shard_id, CLUSTER_NAMELEN); + clusterAddNodeToShard(primary->shard_id, n); + } else if (clusterGetNodesInMyShard(primary) != NULL && + memcmp(primary->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { /* If the primary has been added to a shard, make sure this * node has the same persisted shard id as the primary. */ goto fmterr; } - n->slaveof = master; - clusterNodeAddSlave(master, n); + n->replicaof = primary; + clusterNodeAddReplica(primary, n); } else if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { /* n is a primary but it does not have a persisted shard_id. * This happens if we are loading a nodes.conf generated by @@ -592,7 +597,7 @@ int clusterLoadConfig(char *filename) { /* Set configEpoch for this node. * If the node is a replica, set its config epoch to 0. * If it's a primary, load the config epoch from the configuration file. */ - n->configEpoch = (nodeIsSlave(n) && n->slaveof) ? 0 : strtoull(argv[6], NULL, 10); + n->configEpoch = (nodeIsReplica(n) && n->replicaof) ? 0 : strtoull(argv[6], NULL, 10); /* Populate hash slots served by this instance. */ for (j = 8; j < argc; j++) { @@ -831,7 +836,7 @@ void deriveAnnouncedPorts(int *announced_tcp_port, int *announced_tls_port, int void clusterUpdateMyselfFlags(void) { if (!myself) return; int oldflags = myself->flags; - int nofailover = server.cluster_slave_no_failover ? CLUSTER_NODE_NOFAILOVER : 0; + int nofailover = server.cluster_replica_no_failover ? CLUSTER_NODE_NOFAILOVER : 0; myself->flags &= ~CLUSTER_NODE_NOFAILOVER; myself->flags |= nofailover; if (myself->flags != oldflags) { @@ -919,7 +924,7 @@ static void updateShardId(clusterNode *node, const char *shard_id) { clusterAddNodeToShard(shard_id, node); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } - if (shard_id && myself != node && myself->slaveof == node) { + if (shard_id && myself != node && myself->replicaof == node) { if (memcmp(myself->shard_id, shard_id, CLUSTER_NAMELEN) != 0) { /* shard-id can diverge right after a rolling upgrade * from pre-7.2 releases */ @@ -936,7 +941,7 @@ static inline int areInSameShard(clusterNode *node1, clusterNode *node2) { } static inline uint64_t nodeEpoch(clusterNode *n) { - return n->slaveof ? n->slaveof->configEpoch : n->configEpoch; + return n->replicaof ? n->replicaof->configEpoch : n->configEpoch; } /* Update my hostname based on server configuration values */ @@ -991,7 +996,7 @@ void clusterInit(void) { if (clusterLoadConfig(server.cluster_configfile) == C_ERR) { /* No configuration found. We will just use the random name provided * by the createClusterNode() function. */ - myself = server.cluster->myself = createClusterNode(NULL, CLUSTER_NODE_MYSELF | CLUSTER_NODE_MASTER); + myself = server.cluster->myself = createClusterNode(NULL, CLUSTER_NODE_MYSELF | CLUSTER_NODE_PRIMARY); serverLog(LL_NOTICE, "No cluster configuration found, I'm %.40s", myself->name); clusterAddNode(myself); clusterAddNodeToShard(myself->shard_id, myself); @@ -1017,12 +1022,16 @@ void clusterInit(void) { exit(1); } + /* Register our own rdb aux fields */ + serverAssert(rdbRegisterAuxField("cluster-slot-states", clusterEncodeOpenSlotsAuxField, + clusterDecodeOpenSlotsAuxField) == C_OK); + /* Set myself->port/cport/pport to my listening ports, we'll just need to * discover the IP address via MEET messages. */ deriveAnnouncedPorts(&myself->tcp_port, &myself->tls_port, &myself->cport); server.cluster->mf_end = 0; - server.cluster->mf_slave = NULL; + server.cluster->mf_replica = NULL; for (connTypeForCaching conn_type = CACHE_CONN_TCP; conn_type < CACHE_CONN_TYPE_MAX; conn_type++) { server.cached_cluster_slot_info[conn_type] = NULL; } @@ -1062,20 +1071,20 @@ void clusterInitLast(void) { * * 1) All other nodes are forgotten. * 2) All the assigned / open slots are released. - * 3) If the node is a slave, it turns into a master. + * 3) If the node is a replica, it turns into a primary. * 4) Only for hard reset: a new Node ID is generated. * 5) Only for hard reset: currentEpoch and configEpoch are set to 0. * 6) The new configuration is saved and the cluster state updated. - * 7) If the node was a slave, the whole data set is flushed away. */ + * 7) If the node was a replica, the whole data set is flushed away. */ void clusterReset(int hard) { dictIterator *di; dictEntry *de; int j; - /* Turn into master. */ - if (nodeIsSlave(myself)) { - clusterSetNodeAsMaster(myself); - replicationUnsetMaster(); + /* Turn into primary. */ + if (nodeIsReplica(myself)) { + clusterSetNodeAsPrimary(myself); + replicationUnsetPrimary(); emptyData(-1, EMPTYDB_NO_FLAGS, NULL); } @@ -1250,7 +1259,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* If the server is starting up, don't accept cluster connections: * UPDATE messages may interact with the database content. */ - if (server.masterhost == NULL && server.loading) return; + if (server.primary_host == NULL && server.loading) return; while (max--) { cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); @@ -1321,9 +1330,9 @@ clusterNode *createClusterNode(char *nodename, int flags) { node->slot_info_pairs = NULL; node->slot_info_pairs_count = 0; node->numslots = 0; - node->numslaves = 0; - node->slaves = NULL; - node->slaveof = NULL; + node->num_replicas = 0; + node->replicas = NULL; + node->replicaof = NULL; node->last_in_ping_gossip = 0; node->ping_sent = node->pong_received = 0; node->data_received = 0; @@ -1444,43 +1453,44 @@ static int clusterNodeNameComparator(const void *node1, const void *node2) { return strncasecmp((*(clusterNode **)node1)->name, (*(clusterNode **)node2)->name, CLUSTER_NAMELEN); } -int clusterNodeRemoveSlave(clusterNode *master, clusterNode *slave) { +int clusterNodeRemoveReplica(clusterNode *primary, clusterNode *replica) { int j; - for (j = 0; j < master->numslaves; j++) { - if (master->slaves[j] == slave) { - if ((j + 1) < master->numslaves) { - int remaining_slaves = (master->numslaves - j) - 1; - memmove(master->slaves + j, master->slaves + (j + 1), (sizeof(*master->slaves) * remaining_slaves)); + for (j = 0; j < primary->num_replicas; j++) { + if (primary->replicas[j] == replica) { + if ((j + 1) < primary->num_replicas) { + int remaining_replicas = (primary->num_replicas - j) - 1; + memmove(primary->replicas + j, primary->replicas + (j + 1), + (sizeof(*primary->replicas) * remaining_replicas)); } - master->numslaves--; - if (master->numslaves == 0) master->flags &= ~CLUSTER_NODE_MIGRATE_TO; + primary->num_replicas--; + if (primary->num_replicas == 0) primary->flags &= ~CLUSTER_NODE_MIGRATE_TO; return C_OK; } } return C_ERR; } -int clusterNodeAddSlave(clusterNode *master, clusterNode *slave) { +int clusterNodeAddReplica(clusterNode *primary, clusterNode *replica) { int j; - /* If it's already a slave, don't add it again. */ - for (j = 0; j < master->numslaves; j++) - if (master->slaves[j] == slave) return C_ERR; - master->slaves = zrealloc(master->slaves, sizeof(clusterNode *) * (master->numslaves + 1)); - master->slaves[master->numslaves] = slave; - master->numslaves++; - qsort(master->slaves, master->numslaves, sizeof(clusterNode *), clusterNodeNameComparator); - master->flags |= CLUSTER_NODE_MIGRATE_TO; + /* If it's already a replica, don't add it again. */ + for (j = 0; j < primary->num_replicas; j++) + if (primary->replicas[j] == replica) return C_ERR; + primary->replicas = zrealloc(primary->replicas, sizeof(clusterNode *) * (primary->num_replicas + 1)); + primary->replicas[primary->num_replicas] = replica; + primary->num_replicas++; + qsort(primary->replicas, primary->num_replicas, sizeof(clusterNode *), clusterNodeNameComparator); + primary->flags |= CLUSTER_NODE_MIGRATE_TO; return C_OK; } -int clusterCountNonFailingSlaves(clusterNode *n) { - int j, okslaves = 0; +int clusterCountNonFailingReplicas(clusterNode *n) { + int j, ok_replicas = 0; - for (j = 0; j < n->numslaves; j++) - if (!nodeFailed(n->slaves[j])) okslaves++; - return okslaves; + for (j = 0; j < n->num_replicas; j++) + if (!nodeFailed(n->replicas[j])) ok_replicas++; + return ok_replicas; } /* Low level cleanup of the node structure. Only called by clusterDelNode(). */ @@ -1488,12 +1498,12 @@ void freeClusterNode(clusterNode *n) { sds nodename; int j; - /* If the node has associated slaves, we have to set - * all the slaves->slaveof fields to NULL (unknown). */ - for (j = 0; j < n->numslaves; j++) n->slaves[j]->slaveof = NULL; + /* If the node has associated replicas, we have to set + * all the replicas->replicaof fields to NULL (unknown). */ + for (j = 0; j < n->num_replicas; j++) n->replicas[j]->replicaof = NULL; - /* Remove this node from the list of slaves of its master. */ - if (nodeIsSlave(n) && n->slaveof) clusterNodeRemoveSlave(n->slaveof, n); + /* Remove this node from the list of replicas of its primary. */ + if (nodeIsReplica(n) && n->replicaof) clusterNodeRemoveReplica(n->replicaof, n); /* Unlink from the set of nodes. */ nodename = sdsnewlen(n->name, CLUSTER_NAMELEN); @@ -1506,7 +1516,7 @@ void freeClusterNode(clusterNode *n) { if (n->link) freeClusterLink(n->link); if (n->inbound_link) freeClusterLink(n->inbound_link); listRelease(n->fail_reports); - zfree(n->slaves); + zfree(n->replicas); zfree(n); } @@ -1527,8 +1537,8 @@ void clusterAddNode(clusterNode *node) { * other nodes. * 3) Remove the node from the owning shard * 4) Free the node with freeClusterNode() that will in turn remove it - * from the hash table and from the list of slaves of its master, if - * it is a slave node. + * from the hash table and from the list of replicas of its primary, if + * it is a replica node. */ void clusterDelNode(clusterNode *delnode) { int j; @@ -1571,7 +1581,7 @@ clusterNode *clusterLookupNode(const char *name, int length) { /* Get all the nodes in my shard. * Note that the list returned is not computed on the fly - * via slaveof; rather, it is maintained permanently to + * via replicaof; rather, it is maintained permanently to * track the shard membership and its life cycle is tied * to this process. Therefore, the caller must not * release the list. */ @@ -1671,8 +1681,8 @@ uint64_t clusterGetMaxEpoch(void) { * * 1) When slots are closed after importing. Otherwise resharding would be * too expensive. - * 2) When CLUSTER FAILOVER is called with options that force a slave to - * failover its master even if there is not master majority able to + * 2) When CLUSTER FAILOVER is called with options that force a replica to + * failover its primary even if there is not primary majority able to * create a new configuration epoch. * * The cluster will not explode using this function, even in the case of @@ -1695,14 +1705,14 @@ int clusterBumpConfigEpochWithoutConsensus(void) { } } -/* This function is called when this node is a master, and we receive from - * another master a configuration epoch that is equal to our configuration +/* This function is called when this node is a primary, and we receive from + * another primary a configuration epoch that is equal to our configuration * epoch. * * BACKGROUND * - * It is not possible that different slaves get the same config - * epoch during a failover election, because the slaves need to get voted + * It is not possible that different replicas get the same config + * epoch during a failover election, because the replicas need to get voted * by a majority. However when we perform a manual resharding of the cluster * the node will assign a configuration epoch to itself without to ask * for agreement. Usually resharding happens when the cluster is working well @@ -1721,13 +1731,13 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * end with a different configEpoch at startup automatically. * * In all the cases, we want a mechanism that resolves this issue automatically - * as a safeguard. The same configuration epoch for masters serving different + * as a safeguard. The same configuration epoch for primaries serving different * set of slots is not harmful, but it is if the nodes end serving the same * slots for some reason (manual errors or software bugs) without a proper * failover procedure. * * In general we want a system that eventually always ends with different - * masters having different configuration epochs whatever happened, since + * primaries having different configuration epochs whatever happened, since * nothing is worse than a split-brain condition in a distributed system. * * BEHAVIOR @@ -1742,8 +1752,8 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * end with a different configuration epoch. */ void clusterHandleConfigEpochCollision(clusterNode *sender) { - /* Prerequisites: nodes have the same configEpoch and are both masters. */ - if (sender->configEpoch != myself->configEpoch || !clusterNodeIsMaster(sender) || !clusterNodeIsMaster(myself)) + /* Prerequisites: nodes have the same configEpoch and are both primaries. */ + if (sender->configEpoch != myself->configEpoch || !clusterNodeIsPrimary(sender) || !clusterNodeIsPrimary(myself)) return; /* Don't act if the colliding node has a smaller Node ID. */ if (memcmp(sender->name, myself->name, CLUSTER_NAMELEN) <= 0) return; @@ -1751,10 +1761,8 @@ void clusterHandleConfigEpochCollision(clusterNode *sender) { server.cluster->currentEpoch++; myself->configEpoch = server.cluster->currentEpoch; clusterSaveConfigOrDie(1); - serverLog(LL_VERBOSE, - "WARNING: configEpoch collision with node %.40s (%s)." - " configEpoch set to %llu", - sender->name, sender->human_nodename, (unsigned long long)myself->configEpoch); + serverLog(LL_NOTICE, "configEpoch collision with node %.40s (%s). configEpoch set to %llu", sender->name, + sender->human_nodename, (unsigned long long)myself->configEpoch); } /* ----------------------------------------------------------------------------- @@ -1837,8 +1845,8 @@ int clusterBlacklistExists(char *nodeid) { /* This function checks if a given node should be marked as FAIL. * It happens if the following conditions are met: * - * 1) We received enough failure reports from other master nodes via gossip. - * Enough means that the majority of the masters signaled the node is + * 1) We received enough failure reports from other primary nodes via gossip. + * Enough means that the majority of the primaries signaled the node is * down recently. * 2) We believe this node is in PFAIL state. * @@ -1846,13 +1854,13 @@ int clusterBlacklistExists(char *nodeid) { * event trying to force every other node to set the FAIL flag for the node. * * Note that the form of agreement used here is weak, as we collect the majority - * of masters state during some time, and even if we force agreement by + * of primaries state during some time, and even if we force agreement by * propagating the FAIL message, because of partitions we may not reach every * node. However: * * 1) Either we reach the majority and eventually the FAIL state will propagate * to all the cluster. - * 2) Or there is no majority so no slave promotion will be authorized and the + * 2) Or there is no majority so no replica promotion will be authorized and the * FAIL flag will be cleared after some time. */ void markNodeAsFailingIfNeeded(clusterNode *node) { @@ -1863,9 +1871,9 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { if (nodeFailed(node)) return; /* Already FAILing. */ failures = clusterNodeFailureReportsCount(node); - /* Also count myself as a voter if I'm a master. */ - if (clusterNodeIsMaster(myself)) failures++; - if (failures < needed_quorum) return; /* No weak agreement from masters. */ + /* Also count myself as a voter if I'm a voting primary. */ + if (clusterNodeIsVotingPrimary(myself)) failures++; + if (failures < needed_quorum) return; /* No weak agreement from primaries. */ serverLog(LL_NOTICE, "Marking node %.40s (%s) as failing (quorum reached).", node->name, node->human_nodename); @@ -1876,8 +1884,8 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { /* Broadcast the failing node name to everybody, forcing all the other * reachable nodes to flag the node as FAIL. - * We do that even if this node is a replica and not a master: anyway - * the failing state is triggered collecting failure reports from masters, + * We do that even if this node is a replica and not a primary: anyway + * the failing state is triggered collecting failure reports from primaries, * so here the replica is only helping propagating this status. */ clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); @@ -1891,20 +1899,20 @@ void clearNodeFailureIfNeeded(clusterNode *node) { serverAssert(nodeFailed(node)); - /* For slaves we always clear the FAIL flag if we can contact the + /* For replicas we always clear the FAIL flag if we can contact the * node again. */ - if (nodeIsSlave(node) || node->numslots == 0) { + if (nodeIsReplica(node) || node->numslots == 0) { serverLog(LL_NOTICE, "Clear FAIL state for node %.40s (%s):%s is reachable again.", node->name, - node->human_nodename, nodeIsSlave(node) ? "replica" : "master without slots"); + node->human_nodename, nodeIsReplica(node) ? "replica" : "primary without slots"); node->flags &= ~CLUSTER_NODE_FAIL; clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); } - /* If it is a master and... + /* If it is a primary and... * 1) The FAIL state is old enough. * 2) It is yet serving slots from our point of view (not failed over). * Apparently no one is going to fix these slots, clear the FAIL flag. */ - if (clusterNodeIsMaster(node) && node->numslots > 0 && + if (clusterNodeIsVotingPrimary(node) && (now - node->fail_time) > (server.cluster_node_timeout * CLUSTER_FAIL_UNDO_TIME_MULT)) { serverLog( LL_NOTICE, @@ -2086,17 +2094,17 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { /* Ignore gossips about self. */ if (node && node != myself) { /* We already know this node. - Handle failure reports, only when the sender is a master. */ - if (sender && clusterNodeIsMaster(sender)) { + Handle failure reports, only when the sender is a voting primary. */ + if (sender && clusterNodeIsVotingPrimary(sender)) { if (flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) { if (clusterNodeAddFailureReport(node, sender)) { - serverLog(LL_VERBOSE, "Node %.40s (%s) reported node %.40s (%s) as not reachable.", - sender->name, sender->human_nodename, node->name, node->human_nodename); + serverLog(LL_NOTICE, "Node %.40s (%s) reported node %.40s (%s) as not reachable.", sender->name, + sender->human_nodename, node->name, node->human_nodename); } markNodeAsFailingIfNeeded(node); } else { if (clusterNodeDelFailureReport(node, sender)) { - serverLog(LL_VERBOSE, "Node %.40s (%s) reported node %.40s (%s) is back online.", sender->name, + serverLog(LL_NOTICE, "Node %.40s (%s) reported node %.40s (%s) is back online.", sender->name, sender->human_nodename, node->name, node->human_nodename); } } @@ -2227,32 +2235,32 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, clusterMsg * serverLog(LL_NOTICE, "Address updated for node %.40s (%s), now %s:%d", node->name, node->human_nodename, node->ip, getNodeDefaultClientPort(node)); - /* Check if this is our master and we have to change the + /* Check if this is our primary and we have to change the * replication target as well. */ - if (nodeIsSlave(myself) && myself->slaveof == node) - replicationSetMaster(node->ip, getNodeDefaultReplicationPort(node)); + if (nodeIsReplica(myself) && myself->replicaof == node) + replicationSetPrimary(node->ip, getNodeDefaultReplicationPort(node)); return 1; } -/* Reconfigure the specified node 'n' as a master. This function is called when - * a node that we believed to be a slave is now acting as master in order to +/* Reconfigure the specified node 'n' as a primary. This function is called when + * a node that we believed to be a replica is now acting as primary in order to * update the state of the node. */ -void clusterSetNodeAsMaster(clusterNode *n) { - if (clusterNodeIsMaster(n)) return; +void clusterSetNodeAsPrimary(clusterNode *n) { + if (clusterNodeIsPrimary(n)) return; - if (n->slaveof) { - clusterNodeRemoveSlave(n->slaveof, n); + if (n->replicaof) { + clusterNodeRemoveReplica(n->replicaof, n); if (n != myself) n->flags |= CLUSTER_NODE_MIGRATE_TO; } - n->flags &= ~CLUSTER_NODE_SLAVE; - n->flags |= CLUSTER_NODE_MASTER; - n->slaveof = NULL; + n->flags &= ~CLUSTER_NODE_REPLICA; + n->flags |= CLUSTER_NODE_PRIMARY; + n->replicaof = NULL; /* Update config and state. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE); } -/* This function is called when we receive a master configuration via a +/* This function is called when we receive a primary configuration via a * PING, PONG or UPDATE packet. What we receive is a node, a configEpoch of the * node, and the set of slots claimed under this configEpoch. * @@ -2265,27 +2273,27 @@ void clusterSetNodeAsMaster(clusterNode *n) { * case we receive the info via an UPDATE packet. */ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoch, unsigned char *slots) { int j; - clusterNode *curmaster = NULL, *newmaster = NULL; + clusterNode *cur_primary = NULL, *new_primary = NULL; /* The dirty slots list is a list of slots for which we lose the ownership * while having still keys inside. This usually happens after a failover * or after a manual cluster reconfiguration operated by the admin. * - * If the update message is not able to demote a master to slave (in this - * case we'll resync with the master updating the whole key space), we + * If the update message is not able to demote a primary to replica (in this + * case we'll resync with the primary updating the whole key space), we * need to delete all the keys in the slots we lost ownership. */ uint16_t dirty_slots[CLUSTER_SLOTS]; int dirty_slots_count = 0; - /* We should detect if sender is new master of our shard. + /* We should detect if sender is new primary of our shard. * We will know it if all our slots were migrated to sender, and sender * has no slots except ours */ int sender_slots = 0; int migrated_our_slots = 0; - /* Here we set curmaster to this node or the node this node - * replicates to if it's a slave. In the for loop we are - * interested to check if slots are taken away from curmaster. */ - curmaster = clusterNodeIsMaster(myself) ? myself : myself->slaveof; + /* Here we set cur_primary to this node or the node this node + * replicates to if it's a replica. In the for loop we are + * interested to check if slots are taken away from cur_primary. */ + cur_primary = clusterNodeIsPrimary(myself) ? myself : myself->replicaof; if (sender == myself) { serverLog(LL_NOTICE, "Discarding UPDATE message about myself."); @@ -2322,8 +2330,8 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc dirty_slots_count++; } - if (server.cluster->slots[j] == curmaster) { - newmaster = sender; + if (server.cluster->slots[j] == cur_primary) { + new_primary = sender; migrated_our_slots++; } @@ -2389,7 +2397,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * sender if it has just taken over the primary role. */ if (server.cluster->migrating_slots_to[j] != NULL && server.cluster->migrating_slots_to[j] != sender && (server.cluster->migrating_slots_to[j]->configEpoch < senderConfigEpoch || - nodeIsSlave(server.cluster->migrating_slots_to[j])) && + nodeIsReplica(server.cluster->migrating_slots_to[j])) && areInSameShard(server.cluster->migrating_slots_to[j], sender)) { serverLog(LL_NOTICE, "Failover occurred in migration target." @@ -2415,7 +2423,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * 1. Remove the importing state for the specific slot. * 2. Finalize the slot's ownership, if I am not already the owner of * the slot. */ - if (nodeIsMaster(myself) && server.cluster->importing_slots_from[j] == sender) { + if (nodeIsPrimary(myself) && server.cluster->importing_slots_from[j] == sender) { serverLog(LL_NOTICE, "Slot %d is no longer being imported from node %.40s (%s) in shard %.40s;" " Clear my importing source for the slot.", @@ -2450,13 +2458,13 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * keys redirections. */ if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return; - /* Handle a special case where newmaster is not set but both sender + /* Handle a special case where new_primary is not set but both sender * and myself own no slots and in the same shard. Set the sender as * the new primary if my current config epoch is lower than the * sender's. */ - if (!newmaster && myself->slaveof != sender && sender_slots == 0 && myself->numslots == 0 && + if (!new_primary && myself->replicaof != sender && sender_slots == 0 && myself->numslots == 0 && nodeEpoch(myself) < senderConfigEpoch && areInSameShard(sender, myself)) { - newmaster = sender; + new_primary = sender; } /* If the shard to which this node (myself) belongs loses all of @@ -2478,7 +2486,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * shard and our primary just had its last slot migrated to the * sender. In this case we don't reconfigure ourselves as a replica * of the sender. */ - if (newmaster && curmaster->numslots == 0) { + if (new_primary && cur_primary->numslots == 0) { if (server.cluster_allow_replica_migration || areInSameShard(sender, myself)) { serverLog(LL_NOTICE, "Configuration change detected. Reconfiguring myself " @@ -2486,7 +2494,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc sender->name, sender->human_nodename, sender->shard_id); /* Don't clear the migrating/importing states if this is a replica that * just gets promoted to the new primary in the shard. */ - clusterSetMaster(sender, !areInSameShard(sender, myself)); + clusterSetPrimary(sender, !areInSameShard(sender, myself)); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } else if ((sender_slots >= migrated_our_slots) && !areInSameShard(sender, myself)) { /* When all our slots are lost to the sender and the sender belongs to @@ -2494,14 +2502,14 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * migration. Don't reconfigure this node to migrate to the new shard * in this case. */ serverLog(LL_NOTICE, - "My last slot was migrated to node %.40s (%s) in shard %.40s. I am now an empty master.", + "My last slot was migrated to node %.40s (%s) in shard %.40s. I am now an empty primary.", sender->name, sender->human_nodename, sender->shard_id); } } else if (dirty_slots_count) { /* If we are here, we received an update message which removed * ownership for certain slots we still have keys about, but still - * we are serving some slots, so this master node was not demoted to - * a slave. + * we are serving some slots, so this primary node was not demoted to + * a replica. * * In order to maintain a consistent state between keys and slots * we need to remove all the keys from the slots we lost. */ @@ -2575,10 +2583,6 @@ void *preparePingExt(clusterMsgPingExt *ext, uint16_t type, uint32_t length) { return &ext->ext[0]; } -clusterMsgPingExt *nextPingExt(clusterMsgPingExt *ext) { - return (clusterMsgPingExt *)((char *)ext + ntohl(ext->length)); -} - /* 1. If a NULL hdr is provided, compute the extension size; * 2. If a non-NULL hdr is provided, write the hostname ping * extension at the start of the cursor. This function @@ -2603,7 +2607,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { memcpy(ext->hostname, myself->hostname, sdslen(myself->hostname)); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getHostnamePingExtSize(); @@ -2618,7 +2622,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { memcpy(ext->human_nodename, myself->human_nodename, sdslen(myself->human_nodename)); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getHumanNodenamePingExtSize(); @@ -2640,7 +2644,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { ext->ttl = htonu64(ttl); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getForgottenNodeExtSize(); extensions++; @@ -2654,7 +2658,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { memcpy(ext->shard_id, myself->shard_id, CLUSTER_NAMELEN); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getShardIdPingExtSize(); extensions++; @@ -2689,7 +2693,7 @@ void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) { } else if (type == CLUSTERMSG_EXT_TYPE_FORGOTTEN_NODE) { clusterMsgPingExtForgottenNode *forgotten_node_ext = &(ext->ext[0].forgotten_node); clusterNode *n = clusterLookupNode(forgotten_node_ext->name, CLUSTER_NAMELEN); - if (n && n != myself && !(nodeIsSlave(myself) && myself->slaveof == n)) { + if (n && n != myself && !(nodeIsReplica(myself) && myself->replicaof == n)) { sds id = sdsnewlen(forgotten_node_ext->name, CLUSTER_NAMELEN); dictEntry *de = dictAddOrFind(server.cluster->nodes_black_list, id); uint64_t expire = server.unixtime + ntohu64(forgotten_node_ext->ttl); @@ -2721,9 +2725,9 @@ void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) { * As the cluster progressively upgrades to version 7.2, we can expect the shard_ids * across all nodes to naturally converge and align. * - * If sender is a replica, set the shard_id to the shard_id of its master. + * If sender is a replica, set the shard_id to the shard_id of its primary. * Otherwise, we'll set it now. */ - if (ext_shardid == NULL) ext_shardid = clusterNodeGetMaster(sender)->shard_id; + if (ext_shardid == NULL) ext_shardid = clusterNodeGetPrimary(sender)->shard_id; updateShardId(sender, ext_shardid); } @@ -2767,7 +2771,7 @@ int clusterIsValidPacket(clusterLink *link) { return 0; } - if (type == server.cluster_drop_packet_filter) { + if (type == server.debug_cluster_drop_packet_filter) { serverLog(LL_WARNING, "Dropping packet that matches debug drop filter"); return 0; } @@ -2844,7 +2848,16 @@ int clusterIsValidPacket(clusterLink *link) { * received from the wrong sender ID). */ int clusterProcessPacket(clusterLink *link) { /* Validate that the packet is well-formed */ - if (!clusterIsValidPacket(link)) return 1; + if (!clusterIsValidPacket(link)) { + clusterMsg *hdr = (clusterMsg *)link->rcvbuf; + uint16_t type = ntohs(hdr->type); + if (server.debug_cluster_close_link_on_packet_drop && type == server.debug_cluster_drop_packet_filter) { + freeClusterLink(link); + serverLog(LL_WARNING, "Closing link for matching packet type %hu", type); + return 0; + } + return 1; + } clusterMsg *hdr = (clusterMsg *)link->rcvbuf; uint16_t type = ntohs(hdr->type); @@ -2870,7 +2883,7 @@ int clusterProcessPacket(clusterLink *link) { senderConfigEpoch = ntohu64(hdr->configEpoch); if (senderCurrentEpoch > server.cluster->currentEpoch) server.cluster->currentEpoch = senderCurrentEpoch; /* Update the sender configEpoch if it is a primary publishing a newer one. */ - if (!memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof)) && + if (!memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof)) && senderConfigEpoch > sender->configEpoch) { sender->configEpoch = senderConfigEpoch; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_FSYNC_CONFIG); @@ -2878,16 +2891,16 @@ int clusterProcessPacket(clusterLink *link) { /* Update the replication offset info for this node. */ sender->repl_offset = ntohu64(hdr->offset); sender->repl_offset_time = now; - /* If we are a slave performing a manual failover and our master + /* If we are a replica performing a manual failover and our primary * sent its offset while already paused, populate the MF state. */ - if (server.cluster->mf_end && nodeIsSlave(myself) && myself->slaveof == sender && - hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && server.cluster->mf_master_offset == -1) { - server.cluster->mf_master_offset = sender->repl_offset; + if (server.cluster->mf_end && nodeIsReplica(myself) && myself->replicaof == sender && + hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && server.cluster->mf_primary_offset == -1) { + server.cluster->mf_primary_offset = sender->repl_offset; clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_MANUALFAILOVER); serverLog(LL_NOTICE, "Received replication offset for paused " - "master manual failover: %lld", - server.cluster->mf_master_offset); + "primary manual failover: %lld", + server.cluster->mf_primary_offset); } } @@ -2916,7 +2929,7 @@ int clusterProcessPacket(clusterLink *link) { /* Add this node if it is new for us and the msg type is MEET. * In this stage we don't try to add the node with the right - * flags, slaveof pointer, and so forth, as this details will be + * flags, replicaof pointer, and so forth, as this details will be * resolved when we'll receive PONGs from the node. */ if (!sender && type == CLUSTERMSG_TYPE_MEET) { clusterNode *node; @@ -2942,6 +2955,13 @@ int clusterProcessPacket(clusterLink *link) { if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG || type == CLUSTERMSG_TYPE_MEET) { serverLog(LL_DEBUG, "%s packet received: %.40s", clusterGetMessageTypeString(type), link->node ? link->node->name : "NULL"); + + if (sender && (sender->flags & CLUSTER_NODE_MEET)) { + /* Once we get a response for MEET from the sender, we can stop sending more MEET. */ + sender->flags &= ~CLUSTER_NODE_MEET; + serverLog(LL_NOTICE, "Successfully completed handshake with %.40s (%s)", sender->name, + sender->human_nodename); + } if (!link->inbound) { if (nodeInHandshake(link->node)) { /* If we already have this node, try to change the @@ -2965,13 +2985,13 @@ int clusterProcessPacket(clusterLink *link) { clusterRenameNode(link->node, hdr->sender); serverLog(LL_DEBUG, "Handshake with node %.40s completed.", link->node->name); link->node->flags &= ~CLUSTER_NODE_HANDSHAKE; - link->node->flags |= flags & (CLUSTER_NODE_MASTER | CLUSTER_NODE_SLAVE); + link->node->flags |= flags & (CLUSTER_NODE_PRIMARY | CLUSTER_NODE_REPLICA); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } else if (memcmp(link->node->name, hdr->sender, CLUSTER_NAMELEN) != 0) { /* If the reply has a non matching node ID we * disconnect this node and set it as not having an associated * address. */ - serverLog(LL_DEBUG, + serverLog(LL_NOTICE, "PONG contains mismatching sender ID. About node %.40s (%s) in shard %.40s added %d ms ago, " "having flags %d", link->node->name, link->node->human_nodename, link->node->shard_id, @@ -2990,8 +3010,8 @@ int clusterProcessPacket(clusterLink *link) { /* Copy the CLUSTER_NODE_NOFAILOVER flag from what the sender * announced. This is a dynamic flag that we receive from the * sender, and the latest status must be trusted. We need it to - * be propagated because the slave ranking used to understand the - * delay of each slave in the voting process, needs to know + * be propagated because the replica ranking used to understand the + * delay of each replica in the voting process, needs to know * what are the instances really competing. */ if (sender) { int nofailover = flags & CLUSTER_NODE_NOFAILOVER; @@ -3024,23 +3044,23 @@ int clusterProcessPacket(clusterLink *link) { } } - /* Check for role switch: slave -> master or master -> slave. */ + /* Check for role switch: replica -> primary or primary -> replica. */ if (sender) { serverLog(LL_DEBUG, "node %.40s (%s) announces that it is a %s in shard %.40s", sender->name, sender->human_nodename, - !memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof)) ? "master" : "slave", + !memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof)) ? "primary" : "replica", sender->shard_id); - if (!memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof))) { - /* Node is a master. */ - clusterSetNodeAsMaster(sender); + if (!memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof))) { + /* Node is a primary. */ + clusterSetNodeAsPrimary(sender); } else { - /* Node is a slave. */ - clusterNode *master = clusterLookupNode(hdr->slaveof, CLUSTER_NAMELEN); + /* Node is a replica. */ + clusterNode *primary = clusterLookupNode(hdr->replicaof, CLUSTER_NAMELEN); - if (clusterNodeIsMaster(sender)) { - /* Master turned into a slave! Reconfigure the node. */ - if (master && areInSameShard(master, sender)) { - /* `sender` was a primary and was in the same shard as `master`, its new primary */ + if (clusterNodeIsPrimary(sender)) { + /* Primary turned into a replica! Reconfigure the node. */ + if (primary && areInSameShard(primary, sender)) { + /* `sender` was a primary and was in the same shard as its new primary */ if (sender->configEpoch > senderConfigEpoch) { serverLog(LL_NOTICE, "Ignore stale message from %.40s (%s) in shard %.40s;" @@ -3048,48 +3068,48 @@ int clusterProcessPacket(clusterLink *link) { sender->name, sender->human_nodename, sender->shard_id, (unsigned long long)senderConfigEpoch, (unsigned long long)sender->configEpoch); } else { - /* `master` is still a `slave` in this observer node's view; update its role and configEpoch - */ - clusterSetNodeAsMaster(master); - master->configEpoch = senderConfigEpoch; + /* `primary` is still a `replica` in this observer node's view; + * update its role and configEpoch */ + clusterSetNodeAsPrimary(primary); + primary->configEpoch = senderConfigEpoch; serverLog(LL_NOTICE, "A failover occurred in shard %.40s; node %.40s (%s)" " failed over to node %.40s (%s) with a config epoch of %llu", - sender->shard_id, sender->name, sender->human_nodename, master->name, - master->human_nodename, (unsigned long long)master->configEpoch); + sender->shard_id, sender->name, sender->human_nodename, primary->name, + primary->human_nodename, (unsigned long long)primary->configEpoch); } } else { /* `sender` was moved to another shard and has become a replica, remove its slot assignment */ int slots = clusterDelNodeSlots(sender); serverLog(LL_NOTICE, - "Node %.40s (%s) is no longer master of shard %.40s;" + "Node %.40s (%s) is no longer primary of shard %.40s;" " removed all %d slot(s) it used to own", sender->name, sender->human_nodename, sender->shard_id, slots); - if (master != NULL) { + if (primary != NULL) { serverLog(LL_NOTICE, "Node %.40s (%s) is now part of shard %.40s", sender->name, - sender->human_nodename, master->shard_id); + sender->human_nodename, primary->shard_id); } } - sender->flags &= ~(CLUSTER_NODE_MASTER | CLUSTER_NODE_MIGRATE_TO); - sender->flags |= CLUSTER_NODE_SLAVE; + sender->flags &= ~(CLUSTER_NODE_PRIMARY | CLUSTER_NODE_MIGRATE_TO); + sender->flags |= CLUSTER_NODE_REPLICA; /* Update config and state. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE); } - /* Master node changed for this slave? */ - if (master && sender->slaveof != master) { - if (sender->slaveof) clusterNodeRemoveSlave(sender->slaveof, sender); + /* Primary node changed for this replica? */ + if (primary && sender->replicaof != primary) { + if (sender->replicaof) clusterNodeRemoveReplica(sender->replicaof, sender); serverLog(LL_NOTICE, "Node %.40s (%s) is now a replica of node %.40s (%s) in shard %.40s", - sender->name, sender->human_nodename, master->name, master->human_nodename, + sender->name, sender->human_nodename, primary->name, primary->human_nodename, sender->shard_id); - clusterNodeAddSlave(master, sender); - sender->slaveof = master; + clusterNodeAddReplica(primary, sender); + sender->replicaof = primary; /* Update the shard_id when a replica is connected to its * primary in the very first time. */ - updateShardId(sender, master->shard_id); + updateShardId(sender, primary->shard_id); /* Update config. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); @@ -3099,94 +3119,63 @@ int clusterProcessPacket(clusterLink *link) { /* Update our info about served slots. * - * Note: this MUST happen after we update the master/slave state - * so that CLUSTER_NODE_MASTER flag will be set. */ + * Note: this MUST happen after we update the primary/replica state + * so that CLUSTER_NODE_PRIMARY flag will be set. */ /* Many checks are only needed if the set of served slots this * instance claims is different compared to the set of slots we have * for it. Check this ASAP to avoid other computational expansive * checks later. */ - clusterNode *sender_master = NULL; /* Sender or its master if slave. */ - int dirty_slots = 0; /* Sender claimed slots don't match my view? */ + clusterNode *sender_primary = NULL; /* Sender or its primary if replica. */ + int dirty_slots = 0; /* Sender claimed slots don't match my view? */ if (sender) { - sender_master = clusterNodeIsMaster(sender) ? sender : sender->slaveof; - if (sender_master) { - dirty_slots = memcmp(sender_master->slots, hdr->myslots, sizeof(hdr->myslots)) != 0; + sender_primary = clusterNodeIsPrimary(sender) ? sender : sender->replicaof; + if (sender_primary) { + dirty_slots = memcmp(sender_primary->slots, hdr->myslots, sizeof(hdr->myslots)) != 0; /* Force dirty when the sending shard owns no slots so that * we have a chance to examine and repair slot migrating/importing * states that involve empty shards. */ - dirty_slots |= sender_master->numslots == 0; + dirty_slots |= sender_primary->numslots == 0; } } - /* 1) If the sender of the message is a master, and we detected that + /* 1) If the sender of the message is a primary, and we detected that * the set of slots it claims changed, scan the slots to see if we * need to update our configuration. */ - if (sender_master && dirty_slots) clusterUpdateSlotsConfigWith(sender_master, senderConfigEpoch, hdr->myslots); + if (sender_primary && dirty_slots) + clusterUpdateSlotsConfigWith(sender_primary, senderConfigEpoch, hdr->myslots); /* Explicitly check for a replication loop before attempting the replication - * chain folding logic. - * - * In some rare case, slot config updates (via either PING/PONG or UPDATE) - * can be delivered out of order as illustrated below. - * - * 1. To keep the discussion simple, let's assume we have 2 shards, shard a - * and shard b. Let's also assume there are two slots in total with shard - * a owning slot 1 and shard b owning slot 2. - * 2. Shard a has two nodes: primary A and replica A*; shard b has primary - * B and replica B*. - * 3. A manual failover was initiated on A* and A* just wins the election. - * 4. A* announces to the world that it now owns slot 1 using PING messages. - * These PING messages are queued in the outgoing buffer to every other - * node in the cluster, namely, A, B, and B*. - * 5. Keep in mind that there is no ordering in the delivery of these PING - * messages. For the stale PING message to appear, we need the following - * events in the exact order as they are laid out. - * a. An old PING message before A* becomes the new primary is still queued - * in A*'s outgoing buffer to A. This later becomes the stale message, - * which says A* is a replica of A. It is followed by A*'s election - * winning announcement PING message. - * b. B or B* processes A's election winning announcement PING message - * and sets slots[1]=A*. - * c. A sends a PING message to B (or B*). Since A hasn't learnt that A* - * wins the election, it claims that it owns slot 1 but with a lower - * epoch than B has on slot 1. This leads to B sending an UPDATE to - * A directly saying A* is the new owner of slot 1 with a higher epoch. - * d. A receives the UPDATE from B and executes clusterUpdateSlotsConfigWith. - * A now realizes that it is a replica of A* hence setting myself->slaveof - * to A*. - * e. Finally, the pre-failover PING message queued up in A*'s outgoing - * buffer to A is delivered and processed, out of order though, to A. - * f. This stale PING message creates the replication loop */ - if (myself->slaveof && myself->slaveof->slaveof && myself->slaveof->slaveof != myself) { - /* Safeguard against sub-replicas. A replica's master can turn itself + * chain folding logic. */ + if (myself->replicaof && myself->replicaof->replicaof && myself->replicaof->replicaof != myself) { + /* Safeguard against sub-replicas. A replica's primary can turn itself * into a replica if its last slot is removed. If no other node takes * over the slot, there is nothing else to trigger replica migration. */ serverLog(LL_NOTICE, "I'm a sub-replica! Reconfiguring myself as a replica of %.40s from %.40s", - myself->slaveof->slaveof->name, myself->slaveof->name); - clusterSetMaster(myself->slaveof->slaveof, 1); + myself->replicaof->replicaof->name, myself->replicaof->name); + clusterSetPrimary(myself->replicaof->replicaof, 1); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } /* 2) We also check for the reverse condition, that is, the sender - * claims to serve slots we know are served by a master with a + * claims to serve slots we know are served by a primary with a * greater configEpoch. If this happens we inform the sender. * * This is useful because sometimes after a partition heals, a - * reappearing master may be the last one to claim a given set of + * reappearing primary may be the last one to claim a given set of * hash slots, but with a configuration that other instances know to * be deprecated. Example: * - * A and B are master and slave for slots 1,2,3. + * A and B are primary and replica for slots 1,2,3. * A is partitioned away, B gets promoted. * B is partitioned away, and A returns available. * * Usually B would PING A publishing its set of served slots and its * configEpoch, but because of the partition B can't inform A of the * new configuration, so other nodes that have an updated table must - * do it. In this way A will stop to act as a master (or can try to + * do it. In this way A will stop to act as a primary (or can try to * failover if there are the conditions to win the election). */ if (sender && dirty_slots) { int j; @@ -3212,7 +3201,7 @@ int clusterProcessPacket(clusterLink *link) { /* If our config epoch collides with the sender's try to fix * the problem. */ - if (sender && clusterNodeIsMaster(myself) && clusterNodeIsMaster(sender) && + if (sender && clusterNodeIsPrimary(myself) && clusterNodeIsPrimary(sender) && senderConfigEpoch == myself->configEpoch) { clusterHandleConfigEpochCollision(sender); } @@ -3262,31 +3251,30 @@ int clusterProcessPacket(clusterLink *link) { clusterSendFailoverAuthIfNeeded(sender, hdr); } else if (type == CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK) { if (!sender) return 1; /* We don't know that node. */ - /* We consider this vote only if the sender is a master serving + /* We consider this vote only if the sender is a primary serving * a non zero number of slots, and its currentEpoch is greater or * equal to epoch where this node started the election. */ - if (clusterNodeIsMaster(sender) && sender->numslots > 0 && - senderCurrentEpoch >= server.cluster->failover_auth_epoch) { + if (clusterNodeIsVotingPrimary(sender) && senderCurrentEpoch >= server.cluster->failover_auth_epoch) { server.cluster->failover_auth_count++; /* Maybe we reached a quorum here, set a flag to make sure * we check ASAP. */ clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); } } else if (type == CLUSTERMSG_TYPE_MFSTART) { - /* This message is acceptable only if I'm a master and the sender - * is one of my slaves. */ - if (!sender || sender->slaveof != myself) return 1; - /* Manual failover requested from slaves. Initialize the state + /* This message is acceptable only if I'm a primary and the sender + * is one of my replicas. */ + if (!sender || sender->replicaof != myself) return 1; + /* Manual failover requested from replicas. Initialize the state * accordingly. */ resetManualFailover(); server.cluster->mf_end = now + CLUSTER_MF_TIMEOUT; - server.cluster->mf_slave = sender; + server.cluster->mf_replica = sender; pauseActions(PAUSE_DURING_FAILOVER, now + (CLUSTER_MF_TIMEOUT * CLUSTER_MF_PAUSE_MULT), PAUSE_ACTIONS_CLIENT_WRITE_SET); serverLog(LL_NOTICE, "Manual failover requested by replica %.40s (%s).", sender->name, sender->human_nodename); /* We need to send a ping message to the replica, as it would carry - * `server.cluster->mf_master_offset`, which means the master paused clients - * at offset `server.cluster->mf_master_offset`, so that the replica would + * `server.cluster->mf_primary_offset`, which means the primary paused clients + * at offset `server.cluster->mf_primary_offset`, so that the replica would * know that it is safe to set its `server.cluster->mf_can_start` to 1 so as * to complete failover as quickly as possible. */ clusterSendPing(link, CLUSTERMSG_TYPE_PING); @@ -3299,8 +3287,8 @@ int clusterProcessPacket(clusterLink *link) { if (!n) return 1; /* We don't know the reported node. */ if (n->configEpoch >= reportedConfigEpoch) return 1; /* Nothing new. */ - /* If in our current config the node is a slave, set it as a master. */ - if (nodeIsSlave(n)) clusterSetNodeAsMaster(n); + /* If in our current config the node is a replica, set it as a primary. */ + if (nodeIsReplica(n)) clusterSetNodeAsPrimary(n); /* Update the node's configEpoch. */ n->configEpoch = reportedConfigEpoch; @@ -3407,12 +3395,17 @@ void clusterLinkConnectHandler(connection *conn) { * replaced by the clusterSendPing() call. */ node->ping_sent = old_ping_sent; } - /* We can clear the flag after the first packet is sent. - * If we'll never receive a PONG, we'll never send new packets - * to this node. Instead after the PONG is received and we - * are no longer in meet/handshake status, we want to send - * normal PING packets. */ - node->flags &= ~CLUSTER_NODE_MEET; + /* NOTE: Assume the current node is A and is asked to MEET another node B. + * Once A sends MEET to B, it cannot clear the MEET flag for B until it + * gets a response from B. If the MEET packet is not accepted by B due to + * link failure, A must continue sending MEET. If A doesn't continue sending + * MEET, A will know about B, but B will never add A. Every node always + * responds to PINGs from unknown nodes with a PONG, so A will know about B + * and continue sending PINGs. But B won't add A until it sees a MEET (or it + * gets to know about A from a trusted third node C). In this case, clearing + * the MEET flag here leads to asymmetry in the cluster membership. So, we + * clear the MEET flag in clusterProcessPacket. + */ serverLog(LL_DEBUG, "Connecting with Node %.40s at %s:%d", node->name, node->ip, node->cport); } @@ -3550,13 +3543,13 @@ void clusterBroadcastMessage(clusterMsgSendBlock *msgblock) { * sizeof(clusterMsg) in bytes. */ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { uint64_t offset; - clusterNode *master; + clusterNode *primary; - /* If this node is a master, we send its slots bitmap and configEpoch. - * If this node is a slave we send the master's information instead (the - * node is flagged as slave so the receiver knows that it is NOT really + /* If this node is a primary, we send its slots bitmap and configEpoch. + * If this node is a replica we send the primary's information instead (the + * node is flagged as replica so the receiver knows that it is NOT really * in charge for this slots. */ - master = (nodeIsSlave(myself) && myself->slaveof) ? myself->slaveof : myself; + primary = (nodeIsReplica(myself) && myself->replicaof) ? myself->replicaof : myself; hdr->ver = htons(CLUSTER_PROTO_VER); hdr->sig[0] = 'R'; @@ -3578,9 +3571,9 @@ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { int announced_tcp_port, announced_tls_port, announced_cport; deriveAnnouncedPorts(&announced_tcp_port, &announced_tls_port, &announced_cport); - memcpy(hdr->myslots, master->slots, sizeof(hdr->myslots)); - memset(hdr->slaveof, 0, CLUSTER_NAMELEN); - if (myself->slaveof != NULL) memcpy(hdr->slaveof, myself->slaveof->name, CLUSTER_NAMELEN); + memcpy(hdr->myslots, primary->slots, sizeof(hdr->myslots)); + memset(hdr->replicaof, 0, CLUSTER_NAMELEN); + if (myself->replicaof != NULL) memcpy(hdr->replicaof, myself->replicaof->name, CLUSTER_NAMELEN); if (server.tls_cluster) { hdr->port = htons(announced_tls_port); hdr->pport = htons(announced_tcp_port); @@ -3594,17 +3587,17 @@ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { /* Set the currentEpoch and configEpochs. */ hdr->currentEpoch = htonu64(server.cluster->currentEpoch); - hdr->configEpoch = htonu64(master->configEpoch); + hdr->configEpoch = htonu64(primary->configEpoch); /* Set the replication offset. */ - if (nodeIsSlave(myself)) - offset = replicationGetSlaveOffset(); + if (nodeIsReplica(myself)) + offset = replicationGetReplicaOffset(); else - offset = server.master_repl_offset; + offset = server.primary_repl_offset; hdr->offset = htonu64(offset); /* Set the message flags. */ - if (clusterNodeIsMaster(myself) && server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; + if (clusterNodeIsPrimary(myself) && server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; hdr->totlen = htonl(msglen); } @@ -3647,7 +3640,7 @@ void clusterSendPing(clusterLink *link, int type) { /* How many gossip sections we want to add? 1/10 of the number of nodes * and anyway at least 3. Why 1/10? * - * If we have N masters, with N/10 entries, and we consider that in + * If we have N primaries, with N/10 entries, and we consider that in * node_timeout we exchange with each other node at least 4 packets * (we ping in the worst case in node_timeout/2 time, and we also * receive two pings from the host), we have a total of 8 packets @@ -3660,14 +3653,14 @@ void clusterSendPing(clusterLink *link, int type) { * PROB = probability of being featured in a single gossip entry, * which is 1 / NUM_OF_NODES. * ENTRIES = 10. - * TOTAL_PACKETS = 2 * 4 * NUM_OF_MASTERS. + * TOTAL_PACKETS = 2 * 4 * NUM_OF_PRIMARIES. * - * If we assume we have just masters (so num of nodes and num of masters + * If we assume we have just primaries (so num of nodes and num of primaries * is the same), with 1/10 we always get over the majority, and specifically - * 80% of the number of nodes, to account for many masters failing at the + * 80% of the number of nodes, to account for many primaries failing at the * same time. * - * Since we have non-voting slaves that lower the probability of an entry + * Since we have non-voting replicas that lower the probability of an entry * to feature our node, we set the number of entries per packet as * 10% of the total nodes we have. */ wanted = floor(dictSize(server.cluster->nodes) / 10); @@ -3776,16 +3769,16 @@ void clusterSendPing(clusterLink *link, int type) { * In Cluster mode, pongs are not used just for failure detection, but also * to carry important configuration information. So broadcasting a pong is * useful when something changes in the configuration and we want to make - * the cluster aware ASAP (for instance after a slave promotion). + * the cluster aware ASAP (for instance after a replica promotion). * * The 'target' argument specifies the receiving instances using the * defines below: * * CLUSTER_BROADCAST_ALL -> All known instances. - * CLUSTER_BROADCAST_LOCAL_SLAVES -> All slaves in my master-slaves ring. + * CLUSTER_BROADCAST_LOCAL_REPLICAS -> All replicas in my primary-replicas ring. */ #define CLUSTER_BROADCAST_ALL 0 -#define CLUSTER_BROADCAST_LOCAL_SLAVES 1 +#define CLUSTER_BROADCAST_LOCAL_REPLICAS 1 void clusterBroadcastPong(int target) { dictIterator *di; dictEntry *de; @@ -3796,10 +3789,10 @@ void clusterBroadcastPong(int target) { if (!node->link) continue; if (node == myself || nodeInHandshake(node)) continue; - if (target == CLUSTER_BROADCAST_LOCAL_SLAVES) { - int local_slave = - nodeIsSlave(node) && node->slaveof && (node->slaveof == myself || node->slaveof == myself->slaveof); - if (!local_slave) continue; + if (target == CLUSTER_BROADCAST_LOCAL_REPLICAS) { + int local_replica = nodeIsReplica(node) && node->replicaof && + (node->replicaof == myself || node->replicaof == myself->replicaof); + if (!local_replica) continue; } clusterSendPing(node->link, CLUSTERMSG_TYPE_PONG); } @@ -3955,15 +3948,15 @@ void clusterPropagatePublish(robj *channel, robj *message, int sharded) { } /* ----------------------------------------------------------------------------- - * SLAVE node specific functions + * REPLICA node specific functions * -------------------------------------------------------------------------- */ /* This function sends a FAILOVER_AUTH_REQUEST message to every node in order to - * see if there is the quorum for this slave instance to failover its failing - * master. + * see if there is the quorum for this replica instance to failover its failing + * primary. * - * Note that we send the failover request to everybody, master and slave nodes, - * but only the masters are supposed to reply to our query. */ + * Note that we send the failover request to everybody, primary and replica nodes, + * but only the primaries are supposed to reply to our query. */ void clusterRequestFailoverAuth(void) { uint32_t msglen = sizeof(clusterMsg) - sizeof(union clusterMsgData); clusterMsgSendBlock *msgblock = createClusterMsgSendBlock(CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST, msglen); @@ -3971,7 +3964,7 @@ void clusterRequestFailoverAuth(void) { clusterMsg *hdr = &msgblock->msg; /* If this is a manual failover, set the CLUSTERMSG_FLAG0_FORCEACK bit * in the header to communicate the nodes receiving the message that - * they should authorized the failover even if the master is working. */ + * they should authorized the failover even if the primary is working. */ if (server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_FORCEACK; clusterBroadcastMessage(msgblock); clusterMsgSendBlockDecrRefCount(msgblock); @@ -4001,18 +3994,18 @@ void clusterSendMFStart(clusterNode *node) { /* Vote for the node asking for our vote if there are the conditions. */ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { - clusterNode *master = node->slaveof; + clusterNode *primary = node->replicaof; uint64_t requestCurrentEpoch = ntohu64(request->currentEpoch); uint64_t requestConfigEpoch = ntohu64(request->configEpoch); unsigned char *claimed_slots = request->myslots; int force_ack = request->mflags[0] & CLUSTERMSG_FLAG0_FORCEACK; int j; - /* IF we are not a master serving at least 1 slot, we don't have the + /* IF we are not a primary serving at least 1 slot, we don't have the * right to vote, as the cluster size is the number - * of masters serving at least one slot, and quorum is the cluster + * of primariies serving at least one slot, and quorum is the cluster * size + 1 */ - if (nodeIsSlave(myself) || myself->numslots == 0) return; + if (nodeIsReplica(myself) || myself->numslots == 0) return; /* Request epoch must be >= our currentEpoch. * Note that it is impossible for it to actually be greater since @@ -4032,37 +4025,37 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { return; } - /* Node must be a slave and its master down. - * The master can be non failing if the request is flagged + /* Node must be a replica and its primary down. + * The primary can be non failing if the request is flagged * with CLUSTERMSG_FLAG0_FORCEACK (manual failover). */ - if (clusterNodeIsMaster(node) || master == NULL || (!nodeFailed(master) && !force_ack)) { - if (clusterNodeIsMaster(node)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): it is a master node", node->name, + if (clusterNodeIsPrimary(node) || primary == NULL || (!nodeFailed(primary) && !force_ack)) { + if (clusterNodeIsPrimary(node)) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): it is a primary node", node->name, node->human_nodename); - } else if (master == NULL) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): I don't know its master", node->name, + } else if (primary == NULL) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): I don't know its primary", node->name, node->human_nodename); - } else if (!nodeFailed(master)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): its master is up", node->name, + } else if (!nodeFailed(primary)) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): its primary is up", node->name, node->human_nodename); } return; } - /* We did not voted for a slave about this master for two + /* We did not voted for a replica about this primary for two * times the node timeout. This is not strictly needed for correctness * of the algorithm but makes the base case more linear. */ - if (mstime() - node->slaveof->voted_time < server.cluster_node_timeout * 2) { + if (mstime() - node->replicaof->voted_time < server.cluster_node_timeout * 2) { serverLog(LL_WARNING, "Failover auth denied to %.40s %s: " - "can't vote about this master before %lld milliseconds", + "can't vote about this primary before %lld milliseconds", node->name, node->human_nodename, - (long long)((server.cluster_node_timeout * 2) - (mstime() - node->slaveof->voted_time))); + (long long)((server.cluster_node_timeout * 2) - (mstime() - node->replicaof->voted_time))); return; } - /* The slave requesting the vote must have a configEpoch for the claimed - * slots that is >= the one of the masters currently serving the same + /* The replica requesting the vote must have a configEpoch for the claimed + * slots that is >= the one of the primaries currently serving the same * slots in the current configuration. */ for (j = 0; j < CLUSTER_SLOTS; j++) { if (bitmapTestBit(claimed_slots, j) == 0) continue; @@ -4070,8 +4063,8 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { continue; } /* If we reached this point we found a slot that in our current slots - * is served by a master with a greater configEpoch than the one claimed - * by the slave requesting our vote. Refuse to vote for this slave. */ + * is served by a primary with a greater configEpoch than the one claimed + * by the replica requesting our vote. Refuse to vote for this replica. */ serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): " "slot %d epoch (%llu) > reqEpoch (%llu)", @@ -4080,46 +4073,46 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { return; } - /* We can vote for this slave. */ + /* We can vote for this replica. */ server.cluster->lastVoteEpoch = server.cluster->currentEpoch; - node->slaveof->voted_time = mstime(); + node->replicaof->voted_time = mstime(); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_FSYNC_CONFIG); clusterSendFailoverAuth(node); serverLog(LL_NOTICE, "Failover auth granted to %.40s (%s) for epoch %llu", node->name, node->human_nodename, (unsigned long long)server.cluster->currentEpoch); } -/* This function returns the "rank" of this instance, a slave, in the context - * of its master-slaves ring. The rank of the slave is given by the number of - * other slaves for the same master that have a better replication offset +/* This function returns the "rank" of this instance, a replica, in the context + * of its primar-replicas ring. The rank of the replica is given by the number of + * other replicas for the same primary that have a better replication offset * compared to the local one (better means, greater, so they claim more data). * - * A slave with rank 0 is the one with the greatest (most up to date) + * A replica with rank 0 is the one with the greatest (most up to date) * replication offset, and so forth. Note that because how the rank is computed - * multiple slaves may have the same rank, in case they have the same offset. + * multiple replicas may have the same rank, in case they have the same offset. * - * The slave rank is used to add a delay to start an election in order to - * get voted and replace a failing master. Slaves with better replication + * The replica rank is used to add a delay to start an election in order to + * get voted and replace a failing primary. Replicas with better replication * offsets are more likely to win. */ -int clusterGetSlaveRank(void) { +int clusterGetReplicaRank(void) { long long myoffset; int j, rank = 0; - clusterNode *master; + clusterNode *primary; - serverAssert(nodeIsSlave(myself)); - master = myself->slaveof; - if (master == NULL) return 0; /* Never called by slaves without master. */ + serverAssert(nodeIsReplica(myself)); + primary = myself->replicaof; + if (primary == NULL) return 0; /* Never called by replicas without primary. */ - myoffset = replicationGetSlaveOffset(); - for (j = 0; j < master->numslaves; j++) - if (master->slaves[j] != myself && !nodeCantFailover(master->slaves[j]) && - master->slaves[j]->repl_offset > myoffset) + myoffset = replicationGetReplicaOffset(); + for (j = 0; j < primary->num_replicas; j++) + if (primary->replicas[j] != myself && !nodeCantFailover(primary->replicas[j]) && + primary->replicas[j]->repl_offset > myoffset) rank++; return rank; } -/* This function is called by clusterHandleSlaveFailover() in order to - * let the slave log why it is not able to failover. Sometimes there are +/* This function is called by clusterHandleReplicaFailover() in order to + * let the replica log why it is not able to failover. Sometimes there are * not the conditions, but since the failover function is called again and * again, we can't log the same things continuously. * @@ -4128,18 +4121,18 @@ int clusterGetSlaveRank(void) { * * 1) The reason for which the failover can't be initiated changed. * The reasons also include a NONE reason we reset the state to - * when the slave finds that its master is fine (no FAIL flag). - * 2) Also, the log is emitted again if the master is still down and + * when the replica finds that its primary is fine (no FAIL flag). + * 2) Also, the log is emitted again if the primary is still down and * the reason for not failing over is still the same, but more than * CLUSTER_CANT_FAILOVER_RELOG_PERIOD seconds elapsed. - * 3) Finally, the function only logs if the slave is down for more than + * 3) Finally, the function only logs if the replica is down for more than * five seconds + NODE_TIMEOUT. This way nothing is logged when a * failover starts in a reasonable time. * - * The function is called with the reason why the slave can't failover + * The function is called with the reason why the replica can't failover * which is one of the integer macros CLUSTER_CANT_FAILOVER_*. * - * The function is guaranteed to be called only if 'myself' is a slave. */ + * The function is guaranteed to be called only if 'myself' is a replica. */ void clusterLogCantFailover(int reason) { char *msg; static time_t lastlog_time = 0; @@ -4152,15 +4145,16 @@ void clusterLogCantFailover(int reason) { server.cluster->cant_failover_reason = reason; - /* We also don't emit any log if the master failed no long ago, the - * goal of this function is to log slaves in a stalled condition for + /* We also don't emit any log if the primary failed no long ago, the + * goal of this function is to log replicas in a stalled condition for * a long time. */ - if (myself->slaveof && nodeFailed(myself->slaveof) && (mstime() - myself->slaveof->fail_time) < nolog_fail_time) + if (myself->replicaof && nodeFailed(myself->replicaof) && + (mstime() - myself->replicaof->fail_time) < nolog_fail_time) return; switch (reason) { case CLUSTER_CANT_FAILOVER_DATA_AGE: - msg = "Disconnected from master for longer than allowed. " + msg = "Disconnected from primary for longer than allowed. " "Please check the 'cluster-replica-validity-factor' configuration " "option."; break; @@ -4181,24 +4175,24 @@ void clusterLogCantFailover(int reason) { } /* This function implements the final part of automatic and manual failovers, - * where the slave grabs its master's hash slots, and propagates the new + * where the replica grabs its primary's hash slots, and propagates the new * configuration. * * Note that it's up to the caller to be sure that the node got a new * configuration epoch already. */ -void clusterFailoverReplaceYourMaster(void) { +void clusterFailoverReplaceYourPrimary(void) { int j; - clusterNode *oldmaster = myself->slaveof; + clusterNode *old_primary = myself->replicaof; - if (clusterNodeIsMaster(myself) || oldmaster == NULL) return; + if (clusterNodeIsPrimary(myself) || old_primary == NULL) return; - /* 1) Turn this node into a master. */ - clusterSetNodeAsMaster(myself); - replicationUnsetMaster(); + /* 1) Turn this node into a primary . */ + clusterSetNodeAsPrimary(myself); + replicationUnsetPrimary(); - /* 2) Claim all the slots assigned to our master. */ + /* 2) Claim all the slots assigned to our primary. */ for (j = 0; j < CLUSTER_SLOTS; j++) { - if (clusterNodeCoversSlot(oldmaster, j)) { + if (clusterNodeCoversSlot(old_primary, j)) { clusterDelSlot(j); clusterAddSlot(myself, j); } @@ -4209,22 +4203,22 @@ void clusterFailoverReplaceYourMaster(void) { clusterSaveConfigOrDie(1); /* 4) Pong all the other nodes so that they can update the state - * accordingly and detect that we switched to master role. */ + * accordingly and detect that we switched to primary role. */ clusterBroadcastPong(CLUSTER_BROADCAST_ALL); /* 5) If there was a manual failover in progress, clear the state. */ resetManualFailover(); } -/* This function is called if we are a slave node and our master serving +/* This function is called if we are a replica node and our primary serving * a non-zero amount of hash slots is in FAIL state. * * The goal of this function is: * 1) To check if we are able to perform a failover, is our data updated? - * 2) Try to get elected by masters. + * 2) Try to get elected by primaries. * 3) Perform the failover informing all the other nodes. */ -void clusterHandleSlaveFailover(void) { +void clusterHandleReplicaFailover(void) { mstime_t data_age; mstime_t auth_age = mstime() - server.cluster->failover_auth_time; int needed_quorum = (server.cluster->size / 2) + 1; @@ -4246,12 +4240,13 @@ void clusterHandleSlaveFailover(void) { /* Pre conditions to run the function, that must be met both in case * of an automatic or manual failover: - * 1) We are a slave. - * 2) Our master is flagged as FAIL, or this is a manual failover. + * 1) We are a replica. + * 2) Our primary is flagged as FAIL, or this is a manual failover. * 3) We don't have the no failover configuration set, and this is * not a manual failover. */ - if (clusterNodeIsMaster(myself) || myself->slaveof == NULL || (!nodeFailed(myself->slaveof) && !manual_failover) || - (server.cluster_slave_no_failover && !manual_failover)) { + if (clusterNodeIsPrimary(myself) || myself->replicaof == NULL || + (!nodeFailed(myself->replicaof) && !manual_failover) || + (server.cluster_replica_no_failover && !manual_failover)) { /* There are no reasons to failover, so we set the reason why we * are returning without failing over to NONE. */ server.cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; @@ -4259,25 +4254,25 @@ void clusterHandleSlaveFailover(void) { } /* Set data_age to the number of milliseconds we are disconnected from - * the master. */ + * the primary. */ if (server.repl_state == REPL_STATE_CONNECTED) { - data_age = (mstime_t)(server.unixtime - server.master->lastinteraction) * 1000; + data_age = (mstime_t)(server.unixtime - server.primary->last_interaction) * 1000; } else { data_age = (mstime_t)(server.unixtime - server.repl_down_since) * 1000; } /* Remove the node timeout from the data age as it is fine that we are - * disconnected from our master at least for the time it was down to be + * disconnected from our primary at least for the time it was down to be * flagged as FAIL, that's the baseline. */ if (data_age > server.cluster_node_timeout) data_age -= server.cluster_node_timeout; - /* Check if our data is recent enough according to the slave validity + /* Check if our data is recent enough according to the replica validity * factor configured by the user. * * Check bypassed for manual failovers. */ - if (server.cluster_slave_validity_factor && - data_age > (((mstime_t)server.repl_ping_slave_period * 1000) + - (server.cluster_node_timeout * server.cluster_slave_validity_factor))) { + if (server.cluster_replica_validity_factor && + data_age > (((mstime_t)server.repl_ping_replica_period * 1000) + + (server.cluster_node_timeout * server.cluster_replica_validity_factor))) { if (!manual_failover) { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_DATA_AGE); return; @@ -4292,9 +4287,9 @@ void clusterHandleSlaveFailover(void) { random() % 500; /* Random delay between 0 and 500 milliseconds. */ server.cluster->failover_auth_count = 0; server.cluster->failover_auth_sent = 0; - server.cluster->failover_auth_rank = clusterGetSlaveRank(); - /* We add another delay that is proportional to the slave rank. - * Specifically 1 second * rank. This way slaves that have a probably + server.cluster->failover_auth_rank = clusterGetReplicaRank(); + /* We add another delay that is proportional to the replica rank. + * Specifically 1 second * rank. This way replicas that have a probably * less updated replication offset, are penalized. */ server.cluster->failover_auth_time += server.cluster->failover_auth_rank * 1000; /* However if this is a manual failover, no delay is needed. */ @@ -4307,21 +4302,21 @@ void clusterHandleSlaveFailover(void) { "Start of election delayed for %lld milliseconds " "(rank #%d, offset %lld).", server.cluster->failover_auth_time - mstime(), server.cluster->failover_auth_rank, - replicationGetSlaveOffset()); + replicationGetReplicaOffset()); /* Now that we have a scheduled election, broadcast our offset - * to all the other slaves so that they'll updated their offsets + * to all the other replicas so that they'll updated their offsets * if our offset is better. */ - clusterBroadcastPong(CLUSTER_BROADCAST_LOCAL_SLAVES); + clusterBroadcastPong(CLUSTER_BROADCAST_LOCAL_REPLICAS); return; } /* It is possible that we received more updated offsets from other - * slaves for the same master since we computed our election delay. + * replicas for the same primary since we computed our election delay. * Update the delay if our rank changed. * * Not performed if this is a manual failover. */ if (server.cluster->failover_auth_sent == 0 && server.cluster->mf_end == 0) { - int newrank = clusterGetSlaveRank(); + int newrank = clusterGetReplicaRank(); if (newrank > server.cluster->failover_auth_rank) { long long added_delay = (newrank - server.cluster->failover_auth_rank) * 1000; server.cluster->failover_auth_time += added_delay; @@ -4357,9 +4352,9 @@ void clusterHandleSlaveFailover(void) { /* Check if we reached the quorum. */ if (server.cluster->failover_auth_count >= needed_quorum) { - /* We have the quorum, we can finally failover the master. */ + /* We have the quorum, we can finally failover the primary. */ - serverLog(LL_NOTICE, "Failover election won: I'm the new master."); + serverLog(LL_NOTICE, "Failover election won: I'm the new primary."); /* Update my configEpoch to the epoch of the election. */ if (myself->configEpoch < server.cluster->failover_auth_epoch) { @@ -4369,99 +4364,99 @@ void clusterHandleSlaveFailover(void) { } /* Take responsibility for the cluster slots. */ - clusterFailoverReplaceYourMaster(); + clusterFailoverReplaceYourPrimary(); } else { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES); } } /* ----------------------------------------------------------------------------- - * CLUSTER slave migration + * CLUSTER replica migration * - * Slave migration is the process that allows a slave of a master that is - * already covered by at least another slave, to "migrate" to a master that - * is orphaned, that is, left with no working slaves. + * Replica migration is the process that allows a replica of a primary that is + * already covered by at least another replica, to "migrate" to a primary that + * is orphaned, that is, left with no working replicas. * ------------------------------------------------------------------------- */ /* This function is responsible to decide if this replica should be migrated - * to a different (orphaned) master. It is called by the clusterCron() function + * to a different (orphaned) primary. It is called by the clusterCron() function * only if: * - * 1) We are a slave node. - * 2) It was detected that there is at least one orphaned master in + * 1) We are a replica node. + * 2) It was detected that there is at least one orphaned primary in * the cluster. - * 3) We are a slave of one of the masters with the greatest number of - * slaves. + * 3) We are a replica of one of the primaries with the greatest number of + * replicas. * * This checks are performed by the caller since it requires to iterate - * the nodes anyway, so we spend time into clusterHandleSlaveMigration() + * the nodes anyway, so we spend time into clusterHandleReplicaMigration() * if definitely needed. * - * The function is called with a pre-computed max_slaves, that is the max - * number of working (not in FAIL state) slaves for a single master. + * The function is called with a pre-computed max_replicas, that is the max + * number of working (not in FAIL state) replicas for a single primary. * * Additional conditions for migration are examined inside the function. */ -void clusterHandleSlaveMigration(int max_slaves) { - int j, okslaves = 0; - clusterNode *mymaster = myself->slaveof, *target = NULL, *candidate = NULL; +void clusterHandleReplicaMigration(int max_replicas) { + int j, ok_replicas = 0; + clusterNode *my_primary = myself->replicaof, *target = NULL, *candidate = NULL; dictIterator *di; dictEntry *de; /* Step 1: Don't migrate if the cluster state is not ok. */ if (server.cluster->state != CLUSTER_OK) return; - /* Step 2: Don't migrate if my master will not be left with at least - * 'migration-barrier' slaves after my migration. */ - if (mymaster == NULL) return; - for (j = 0; j < mymaster->numslaves; j++) - if (!nodeFailed(mymaster->slaves[j]) && !nodeTimedOut(mymaster->slaves[j])) okslaves++; - if (okslaves <= server.cluster_migration_barrier) return; + /* Step 2: Don't migrate if my primary will not be left with at least + * 'migration-barrier' replicas after my migration. */ + if (my_primary == NULL) return; + for (j = 0; j < my_primary->num_replicas; j++) + if (!nodeFailed(my_primary->replicas[j]) && !nodeTimedOut(my_primary->replicas[j])) ok_replicas++; + if (ok_replicas <= server.cluster_migration_barrier) return; /* Step 3: Identify a candidate for migration, and check if among the - * masters with the greatest number of ok slaves, I'm the one with the - * smallest node ID (the "candidate slave"). + * primaries with the greatest number of ok replicas, I'm the one with the + * smallest node ID (the "candidate replica"). * * Note: this means that eventually a replica migration will occur - * since slaves that are reachable again always have their FAIL flag + * since replicas that are reachable again always have their FAIL flag * cleared, so eventually there must be a candidate. * There is a possible race condition causing multiple - * slaves to migrate at the same time, but this is unlikely to + * replicas to migrate at the same time, but this is unlikely to * happen and relatively harmless when it does. */ candidate = myself; di = dictGetSafeIterator(server.cluster->nodes); while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); - int okslaves = 0, is_orphaned = 1; + int ok_replicas = 0, is_orphaned = 1; - /* We want to migrate only if this master is working, orphaned, and - * used to have slaves or if failed over a master that had slaves + /* We want to migrate only if this primary is working, orphaned, and + * used to have replicas or if failed over a primary that had replicas * (MIGRATE_TO flag). This way we only migrate to instances that were * supposed to have replicas. */ - if (nodeIsSlave(node) || nodeFailed(node)) is_orphaned = 0; + if (nodeIsReplica(node) || nodeFailed(node)) is_orphaned = 0; if (!(node->flags & CLUSTER_NODE_MIGRATE_TO)) is_orphaned = 0; - /* Check number of working slaves. */ - if (clusterNodeIsMaster(node)) okslaves = clusterCountNonFailingSlaves(node); - if (okslaves > 0) is_orphaned = 0; + /* Check number of working replicas. */ + if (clusterNodeIsPrimary(node)) ok_replicas = clusterCountNonFailingReplicas(node); + if (ok_replicas > 0) is_orphaned = 0; if (is_orphaned) { if (!target && node->numslots > 0) target = node; /* Track the starting time of the orphaned condition for this - * master. */ + * primary. */ if (!node->orphaned_time) node->orphaned_time = mstime(); } else { node->orphaned_time = 0; } - /* Check if I'm the slave candidate for the migration: attached - * to a master with the maximum number of slaves and with the smallest + /* Check if I'm the replica candidate for the migration: attached + * to a primary with the maximum number of replicas and with the smallest * node ID. */ - if (okslaves == max_slaves) { - for (j = 0; j < node->numslaves; j++) { - if (memcmp(node->slaves[j]->name, candidate->name, CLUSTER_NAMELEN) < 0) { - candidate = node->slaves[j]; + if (ok_replicas == max_replicas) { + for (j = 0; j < node->num_replicas; j++) { + if (memcmp(node->replicas[j]->name, candidate->name, CLUSTER_NAMELEN) < 0) { + candidate = node->replicas[j]; } } } @@ -4469,62 +4464,62 @@ void clusterHandleSlaveMigration(int max_slaves) { dictReleaseIterator(di); /* Step 4: perform the migration if there is a target, and if I'm the - * candidate, but only if the master is continuously orphaned for a + * candidate, but only if the primary is continuously orphaned for a * couple of seconds, so that during failovers, we give some time to - * the natural slaves of this instance to advertise their switch from - * the old master to the new one. */ - if (target && candidate == myself && (mstime() - target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY && + * the natural replicas of this instance to advertise their switch from + * the old primary to the new one. */ + if (target && candidate == myself && (mstime() - target->orphaned_time) > CLUSTER_REPLICA_MIGRATION_DELAY && !(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) { - serverLog(LL_NOTICE, "Migrating to orphaned master %.40s (%s) in shard %.40s", target->name, + serverLog(LL_NOTICE, "Migrating to orphaned primary %.40s (%s) in shard %.40s", target->name, target->human_nodename, target->shard_id); - clusterSetMaster(target, 1); + clusterSetPrimary(target, 1); } } /* ----------------------------------------------------------------------------- * CLUSTER manual failover * - * This are the important steps performed by slaves during a manual failover: + * This are the important steps performed by replicas during a manual failover: * 1) User send CLUSTER FAILOVER command. The failover state is initialized * setting mf_end to the millisecond unix time at which we'll abort the * attempt. - * 2) Slave sends a MFSTART message to the master requesting to pause clients + * 2) Replica sends a MFSTART message to the primary requesting to pause clients * for two times the manual failover timeout CLUSTER_MF_TIMEOUT. - * When master is paused for manual failover, it also starts to flag + * When primary is paused for manual failover, it also starts to flag * packets with CLUSTERMSG_FLAG0_PAUSED. - * 3) Slave waits for master to send its replication offset flagged as PAUSED. - * 4) If slave received the offset from the master, and its offset matches, - * mf_can_start is set to 1, and clusterHandleSlaveFailover() will perform + * 3) Replica waits for primary to send its replication offset flagged as PAUSED. + * 4) If replica received the offset from the primary, and its offset matches, + * mf_can_start is set to 1, and clusterHandleReplicaFailover() will perform * the failover as usually, with the difference that the vote request - * will be modified to force masters to vote for a slave that has a - * working master. + * will be modified to force primaries to vote for a replica that has a + * working primary. * - * From the point of view of the master things are simpler: when a - * PAUSE_CLIENTS packet is received the master sets mf_end as well and - * the sender in mf_slave. During the time limit for the manual failover - * the master will just send PINGs more often to this slave, flagged with - * the PAUSED flag, so that the slave will set mf_master_offset when receiving - * a packet from the master with this flag set. + * From the point of view of the primary things are simpler: when a + * PAUSE_CLIENTS packet is received the primary sets mf_end as well and + * the sender in mf_replica. During the time limit for the manual failover + * the primary will just send PINGs more often to this replica, flagged with + * the PAUSED flag, so that the replica will set mf_primary_offset when receiving + * a packet from the primary with this flag set. * * The goal of the manual failover is to perform a fast failover without - * data loss due to the asynchronous master-slave replication. + * data loss due to the asynchronous primary-replica replication. * -------------------------------------------------------------------------- */ -/* Reset the manual failover state. This works for both masters and slaves +/* Reset the manual failover state. This works for both primaries and replicas * as all the state about manual failover is cleared. * * The function can be used both to initialize the manual failover state at * startup or to abort a manual failover in progress. */ void resetManualFailover(void) { - if (server.cluster->mf_slave) { - /* We were a master failing over, so we paused clients and related actions. + if (server.cluster->mf_replica) { + /* We were a primary failing over, so we paused clients and related actions. * Regardless of the outcome we unpause now to allow traffic again. */ unpauseActions(PAUSE_DURING_FAILOVER); } server.cluster->mf_end = 0; /* No manual failover in progress. */ server.cluster->mf_can_start = 0; - server.cluster->mf_slave = NULL; - server.cluster->mf_master_offset = -1; + server.cluster->mf_replica = NULL; + server.cluster->mf_primary_offset = -1; } /* If a manual failover timed out, abort it. */ @@ -4542,16 +4537,16 @@ void clusterHandleManualFailover(void) { if (server.cluster->mf_end == 0) return; /* If mf_can_start is non-zero, the failover was already triggered so the - * next steps are performed by clusterHandleSlaveFailover(). */ + * next steps are performed by clusterHandleReplicaFailover(). */ if (server.cluster->mf_can_start) return; - if (server.cluster->mf_master_offset == -1) return; /* Wait for offset... */ + if (server.cluster->mf_primary_offset == -1) return; /* Wait for offset... */ - if (server.cluster->mf_master_offset == replicationGetSlaveOffset()) { - /* Our replication offset matches the master replication offset + if (server.cluster->mf_primary_offset == replicationGetReplicaOffset()) { + /* Our replication offset matches the primary replication offset * announced after clients were paused. We can start the failover. */ server.cluster->mf_can_start = 1; - serverLog(LL_NOTICE, "All master replication stream processed, " + serverLog(LL_NOTICE, "All primary replication stream processed, " "manual failover can start."); clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); return; @@ -4631,9 +4626,9 @@ void clusterCron(void) { dictIterator *di; dictEntry *de; int update_state = 0; - int orphaned_masters; /* How many masters there are without ok slaves. */ - int max_slaves; /* Max number of ok slaves for a single master. */ - int this_slaves; /* Number of ok slaves for our master (if we are slave). */ + int orphaned_primaries; /* How many primaries there are without ok replicas. */ + int max_replicas; /* Max number of ok replicas for a single primary. */ + int this_replicas; /* Number of ok replicas for our primary (if we are replica). */ mstime_t min_pong = 0, now = mstime(); clusterNode *min_pong_node = NULL; static unsigned long long iteration = 0; @@ -4693,13 +4688,13 @@ void clusterCron(void) { /* Iterate nodes to check if we need to flag something as failing. * This loop is also responsible to: - * 1) Check if there are orphaned masters (masters without non failing - * slaves). - * 2) Count the max number of non failing slaves for a single master. - * 3) Count the number of slaves for our master, if we are a slave. */ - orphaned_masters = 0; - max_slaves = 0; - this_slaves = 0; + * 1) Check if there are orphaned primaries (primaries without non failing + * replicas). + * 2) Count the max number of non failing replicas for a single primary. + * 3) Count the number of replicas for our primary, if we are a replica. */ + orphaned_primaries = 0; + max_replicas = 0; + this_replicas = 0; di = dictGetSafeIterator(server.cluster->nodes); while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); @@ -4707,19 +4702,19 @@ void clusterCron(void) { if (node->flags & (CLUSTER_NODE_MYSELF | CLUSTER_NODE_NOADDR | CLUSTER_NODE_HANDSHAKE)) continue; - /* Orphaned master check, useful only if the current instance - * is a slave that may migrate to another master. */ - if (nodeIsSlave(myself) && clusterNodeIsMaster(node) && !nodeFailed(node)) { - int okslaves = clusterCountNonFailingSlaves(node); + /* Orphaned primary check, useful only if the current instance + * is a replica that may migrate to another primary. */ + if (nodeIsReplica(myself) && clusterNodeIsPrimary(node) && !nodeFailed(node)) { + int ok_replicas = clusterCountNonFailingReplicas(node); - /* A master is orphaned if it is serving a non-zero number of - * slots, have no working slaves, but used to have at least one - * slave, or failed over a master that used to have slaves. */ - if (okslaves == 0 && node->numslots > 0 && node->flags & CLUSTER_NODE_MIGRATE_TO) { - orphaned_masters++; + /* A primary is orphaned if it is serving a non-zero number of + * slots, have no working replicas, but used to have at least one + * replica, or failed over a primary that used to have replicas. */ + if (ok_replicas == 0 && node->numslots > 0 && node->flags & CLUSTER_NODE_MIGRATE_TO) { + orphaned_primaries++; } - if (okslaves > max_slaves) max_slaves = okslaves; - if (myself->slaveof == node) this_slaves = okslaves; + if (ok_replicas > max_replicas) max_replicas = ok_replicas; + if (myself->replicaof == node) this_replicas = ok_replicas; } /* If we are not receiving any data for more than half the cluster @@ -4749,9 +4744,10 @@ void clusterCron(void) { continue; } - /* If we are a master and one of the slaves requested a manual + /* If we are a primary and one of the replicas requested a manual * failover, ping it continuously. */ - if (server.cluster->mf_end && clusterNodeIsMaster(myself) && server.cluster->mf_slave == node && node->link) { + if (server.cluster->mf_end && clusterNodeIsPrimary(myself) && server.cluster->mf_replica == node && + node->link) { clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); continue; } @@ -4775,36 +4771,37 @@ void clusterCron(void) { if (!(node->flags & (CLUSTER_NODE_PFAIL | CLUSTER_NODE_FAIL))) { node->flags |= CLUSTER_NODE_PFAIL; update_state = 1; - if (clusterNodeIsMaster(myself) && server.cluster->size == 1) { + if (server.cluster->size == 1 && clusterNodeIsVotingPrimary(myself)) { markNodeAsFailingIfNeeded(node); } else { - serverLog(LL_DEBUG, "*** NODE %.40s possibly failing", node->name); + serverLog(LL_NOTICE, "NODE %.40s (%s) possibly failing.", node->name, node->human_nodename); } } } } dictReleaseIterator(di); - /* If we are a slave node but the replication is still turned off, - * enable it if we know the address of our master and it appears to + /* If we are a replica node but the replication is still turned off, + * enable it if we know the address of our primary and it appears to * be up. */ - if (nodeIsSlave(myself) && server.masterhost == NULL && myself->slaveof && nodeHasAddr(myself->slaveof)) { - replicationSetMaster(myself->slaveof->ip, getNodeDefaultReplicationPort(myself->slaveof)); + if (nodeIsReplica(myself) && server.primary_host == NULL && myself->replicaof && nodeHasAddr(myself->replicaof)) { + replicationSetPrimary(myself->replicaof->ip, getNodeDefaultReplicationPort(myself->replicaof)); } /* Abort a manual failover if the timeout is reached. */ manualFailoverCheckTimeout(); - if (nodeIsSlave(myself)) { + if (nodeIsReplica(myself)) { clusterHandleManualFailover(); - if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleSlaveFailover(); - /* If there are orphaned slaves, and we are a slave among the masters - * with the max number of non-failing slaves, consider migrating to - * the orphaned masters. Note that it does not make sense to try - * a migration if there is no master with at least *two* working - * slaves. */ - if (orphaned_masters && max_slaves >= 2 && this_slaves == max_slaves && server.cluster_allow_replica_migration) - clusterHandleSlaveMigration(max_slaves); + if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleReplicaFailover(); + /* If there are orphaned replicas, and we are a replica among the primaries + * with the max number of non-failing replicas, consider migrating to + * the orphaned primaries. Note that it does not make sense to try + * a migration if there is no primary with at least *two* working + * replicas. */ + if (orphaned_primaries && max_replicas >= 2 && this_replicas == max_replicas && + server.cluster_allow_replica_migration) + clusterHandleReplicaMigration(max_replicas); } if (update_state || server.cluster->state == CLUSTER_FAIL) clusterUpdateState(); @@ -4825,14 +4822,14 @@ void clusterBeforeSleep(void) { if (flags & CLUSTER_TODO_HANDLE_MANUALFAILOVER) { /* Handle manual failover as soon as possible so that won't have a 100ms * as it was handled only in clusterCron */ - if (nodeIsSlave(myself)) { + if (nodeIsReplica(myself)) { clusterHandleManualFailover(); - if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleSlaveFailover(); + if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleReplicaFailover(); } } else if (flags & CLUSTER_TODO_HANDLE_FAILOVER) { /* Handle failover, this is needed when it is likely that there is already - * the quorum from masters in order to react fast. */ - clusterHandleSlaveFailover(); + * the quorum from primaries in order to react fast. */ + clusterHandleReplicaFailover(); } /* Update the cluster state. */ @@ -4878,21 +4875,21 @@ void bitmapClearBit(unsigned char *bitmap, int pos) { bitmap[byte] &= ~(1 << bit); } -/* Return non-zero if there is at least one master with slaves in the cluster. +/* Return non-zero if there is at least one primary with replicas in the cluster. * Otherwise zero is returned. Used by clusterNodeSetSlotBit() to set the - * MIGRATE_TO flag the when a master gets the first slot. */ -int clusterMastersHaveSlaves(void) { - dictIterator *di = dictGetSafeIterator(server.cluster->nodes); + * MIGRATE_TO flag the when a primary gets the first slot. */ +int clusterPrimariesHaveReplicas(void) { + dictIterator di; + dictInitIterator(&di, server.cluster->nodes); dictEntry *de; - int slaves = 0; - while ((de = dictNext(di)) != NULL) { + int replicas = 0; + while ((de = dictNext(&di)) != NULL) { clusterNode *node = dictGetVal(de); - if (nodeIsSlave(node)) continue; - slaves += node->numslaves; + if (nodeIsReplica(node)) continue; + replicas += node->num_replicas; } - dictReleaseIterator(di); - return slaves != 0; + return replicas != 0; } /* Set the slot bit and return the old value. */ @@ -4901,20 +4898,20 @@ int clusterNodeSetSlotBit(clusterNode *n, int slot) { if (!old) { bitmapSetBit(n->slots, slot); n->numslots++; - /* When a master gets its first slot, even if it has no slaves, - * it gets flagged with MIGRATE_TO, that is, the master is a valid + /* When a primary gets its first slot, even if it has no replicas, + * it gets flagged with MIGRATE_TO, that is, the primary is a valid * target for replicas migration, if and only if at least one of - * the other masters has slaves right now. + * the other primaries has replicas right now. * - * Normally masters are valid targets of replica migration if: - * 1. The used to have slaves (but no longer have). - * 2. They are slaves failing over a master that used to have slaves. + * Normally primaries are valid targets of replica migration if: + * 1. The used to have replicas (but no longer have). + * 2. They are replicas failing over a primary that used to have replicas. * - * However new masters with slots assigned are considered valid - * migration targets if the rest of the cluster is not a slave-less. + * However new primaries with slots assigned are considered valid + * migration targets if the rest of the cluster is not a replica-less. * * See https://github.com/redis/redis/issues/3043 for more info. */ - if (n->numslots == 1 && clusterMastersHaveSlaves()) n->flags |= CLUSTER_NODE_MIGRATE_TO; + if (n->numslots == 1 && clusterPrimariesHaveReplicas()) n->flags |= CLUSTER_NODE_MIGRATE_TO; } return old; } @@ -4954,7 +4951,7 @@ int clusterDelSlot(int slot) { if (!n) return C_ERR; - /* Cleanup the channels in master/replica as part of slot deletion. */ + /* Cleanup the channels in primary/replica as part of slot deletion. */ removeChannelsInSlot(slot); /* Clear the slot bit. */ serverAssert(clusterNodeClearSlotBit(n, slot) == 1); @@ -4979,7 +4976,7 @@ int clusterDelNodeSlots(clusterNode *node) { } /* Clear the migrating / importing state for all the slots. - * This is useful at initialization and when turning a master into slave. */ + * This is useful at initialization and when turning a primary into replica. */ void clusterCloseAllSlots(void) { memset(server.cluster->migrating_slots_to, 0, sizeof(server.cluster->migrating_slots_to)); memset(server.cluster->importing_slots_from, 0, sizeof(server.cluster->importing_slots_from)); @@ -4999,20 +4996,20 @@ void clusterCloseAllSlots(void) { void clusterUpdateState(void) { int j, new_state; - int reachable_masters = 0; + int reachable_primaries = 0; static mstime_t among_minority_time; static mstime_t first_call_time = 0; server.cluster->todo_before_sleep &= ~CLUSTER_TODO_UPDATE_STATE; - /* If this is a master node, wait some time before turning the state + /* If this is a primary node, wait some time before turning the state * into OK, since it is not a good idea to rejoin the cluster as a writable - * master, after a reboot, without giving the cluster a chance to + * primary, after a reboot, without giving the cluster a chance to * reconfigure this node. Note that the delay is calculated starting from * the first call to this function and not since the server start, in order * to not count the DB loading time. */ if (first_call_time == 0) first_call_time = mstime(); - if (clusterNodeIsMaster(myself) && server.cluster->state == CLUSTER_FAIL && + if (clusterNodeIsPrimary(myself) && server.cluster->state == CLUSTER_FAIL && mstime() - first_call_time < CLUSTER_WRITABLE_DELAY) return; @@ -5030,10 +5027,10 @@ void clusterUpdateState(void) { } } - /* Compute the cluster size, that is the number of master nodes + /* Compute the cluster size, that is the number of primary nodes * serving at least a single slot. * - * At the same time count the number of reachable masters having + * At the same time count the number of reachable primaries having * at least one slot. */ { dictIterator *di; @@ -5044,9 +5041,9 @@ void clusterUpdateState(void) { while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); - if (clusterNodeIsMaster(node) && node->numslots) { + if (clusterNodeIsVotingPrimary(node)) { server.cluster->size++; - if ((node->flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) == 0) reachable_masters++; + if ((node->flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) == 0) reachable_primaries++; } } dictReleaseIterator(di); @@ -5057,7 +5054,7 @@ void clusterUpdateState(void) { { int needed_quorum = (server.cluster->size / 2) + 1; - if (reachable_masters < needed_quorum) { + if (reachable_primaries < needed_quorum) { new_state = CLUSTER_FAIL; among_minority_time = mstime(); } @@ -5067,14 +5064,14 @@ void clusterUpdateState(void) { if (new_state != server.cluster->state) { mstime_t rejoin_delay = server.cluster_node_timeout; - /* If the instance is a master and was partitioned away with the + /* If the instance is a primary and was partitioned away with the * minority, don't let it accept queries for some time after the * partition heals, to make sure there is enough time to receive * a configuration update. */ if (rejoin_delay > CLUSTER_MAX_REJOIN_DELAY) rejoin_delay = CLUSTER_MAX_REJOIN_DELAY; if (rejoin_delay < CLUSTER_MIN_REJOIN_DELAY) rejoin_delay = CLUSTER_MIN_REJOIN_DELAY; - if (new_state == CLUSTER_OK && clusterNodeIsMaster(myself) && mstime() - among_minority_time < rejoin_delay) { + if (new_state == CLUSTER_OK && clusterNodeIsPrimary(myself) && mstime() - among_minority_time < rejoin_delay) { return; } @@ -5107,12 +5104,12 @@ int verifyClusterConfigWithData(void) { int update_config = 0; /* Return ASAP if a module disabled cluster redirections. In that case - * every master can store keys about every possible hash slot. */ + * every primary can store keys about every possible hash slot. */ if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return C_OK; - /* If this node is a slave, don't perform the check at all as we + /* If this node is a replica, don't perform the check at all as we * completely depend on the replication stream. */ - if (nodeIsSlave(myself)) return C_OK; + if (nodeIsReplica(myself)) return C_OK; /* Make sure we only have keys in DB0. */ for (j = 1; j < server.dbnum; j++) { @@ -5165,35 +5162,35 @@ int verifyClusterConfigWithData(void) { /* Remove all the shard channel related information not owned by the current shard. */ static inline void removeAllNotOwnedShardChannelSubscriptions(void) { if (!kvstoreSize(server.pubsubshard_channels)) return; - clusterNode *currmaster = clusterNodeIsMaster(myself) ? myself : myself->slaveof; + clusterNode *cur_primary = clusterNodeIsPrimary(myself) ? myself : myself->replicaof; for (int j = 0; j < CLUSTER_SLOTS; j++) { - if (server.cluster->slots[j] != currmaster) { + if (server.cluster->slots[j] != cur_primary) { removeChannelsInSlot(j); } } } /* ----------------------------------------------------------------------------- - * SLAVE nodes handling + * REPLICA nodes handling * -------------------------------------------------------------------------- */ -/* Set the specified node 'n' as master for this node. - * If this node is currently a master, it is turned into a slave. */ -void clusterSetMaster(clusterNode *n, int closeSlots) { +/* Set the specified node 'n' as primary for this node. + * If this node is currently a primary, it is turned into a replica. */ +void clusterSetPrimary(clusterNode *n, int closeSlots) { serverAssert(n != myself); serverAssert(myself->numslots == 0); - if (clusterNodeIsMaster(myself)) { - myself->flags &= ~(CLUSTER_NODE_MASTER | CLUSTER_NODE_MIGRATE_TO); - myself->flags |= CLUSTER_NODE_SLAVE; + if (clusterNodeIsPrimary(myself)) { + myself->flags &= ~(CLUSTER_NODE_PRIMARY | CLUSTER_NODE_MIGRATE_TO); + myself->flags |= CLUSTER_NODE_REPLICA; } else { - if (myself->slaveof) clusterNodeRemoveSlave(myself->slaveof, myself); + if (myself->replicaof) clusterNodeRemoveReplica(myself->replicaof, myself); } if (closeSlots) clusterCloseAllSlots(); - myself->slaveof = n; + myself->replicaof = n; updateShardId(myself, n->shard_id); - clusterNodeAddSlave(n, myself); - replicationSetMaster(n->ip, getNodeDefaultReplicationPort(n)); + clusterNodeAddReplica(n, myself); + replicationSetPrimary(n->ip, getNodeDefaultReplicationPort(n)); removeAllNotOwnedShardChannelSubscriptions(); resetManualFailover(); } @@ -5208,8 +5205,8 @@ struct clusterNodeFlags { }; static struct clusterNodeFlags clusterNodeFlagsTable[] = { - {CLUSTER_NODE_MYSELF, "myself,"}, {CLUSTER_NODE_MASTER, "master,"}, - {CLUSTER_NODE_SLAVE, "slave,"}, {CLUSTER_NODE_PFAIL, "fail?,"}, + {CLUSTER_NODE_MYSELF, "myself,"}, {CLUSTER_NODE_PRIMARY, "master,"}, + {CLUSTER_NODE_REPLICA, "slave,"}, {CLUSTER_NODE_PFAIL, "fail?,"}, {CLUSTER_NODE_FAIL, "fail,"}, {CLUSTER_NODE_HANDSHAKE, "handshake,"}, {CLUSTER_NODE_NOADDR, "noaddr,"}, {CLUSTER_NODE_NOFAILOVER, "nofailover,"}}; @@ -5280,10 +5277,10 @@ sds clusterGenNodeDescription(client *c, clusterNode *node, int tls_primary) { ci = sdscatlen(ci, " ", 1); ci = representClusterNodeFlags(ci, node->flags); - /* Slave of... or just "-" */ + /* Replica of... or just "-" */ ci = sdscatlen(ci, " ", 1); - if (node->slaveof) - ci = sdscatlen(ci, node->slaveof->name, CLUSTER_NAMELEN); + if (node->replicaof) + ci = sdscatlen(ci, node->replicaof->name, CLUSTER_NAMELEN); else ci = sdscatlen(ci, "-", 1); @@ -5543,7 +5540,7 @@ void clusterUpdateSlots(client *c, unsigned char *slots, int del) { long long getNodeReplicationOffset(clusterNode *node) { if (node->flags & CLUSTER_NODE_MYSELF) { - return nodeIsSlave(node) ? replicationGetSlaveOffset() : server.master_repl_offset; + return nodeIsReplica(node) ? replicationGetReplicaOffset() : server.primary_repl_offset; } else { return node->repl_offset; } @@ -5586,7 +5583,7 @@ void addNodeDetailsToShardReply(client *c, clusterNode *node) { long long node_offset = getNodeReplicationOffset(node); addReplyBulkCString(c, "role"); - addReplyBulkCString(c, nodeIsSlave(node) ? "replica" : "master"); + addReplyBulkCString(c, nodeIsReplica(node) ? "replica" : "master"); reply_count++; addReplyBulkCString(c, "replication-offset"); @@ -5597,7 +5594,7 @@ void addNodeDetailsToShardReply(client *c, clusterNode *node) { const char *health_msg = NULL; if (nodeFailed(node)) { health_msg = "fail"; - } else if (nodeIsSlave(node) && node_offset == 0) { + } else if (nodeIsReplica(node) && node_offset == 0) { health_msg = "loading"; } else { health_msg = "online"; @@ -5616,7 +5613,7 @@ void addShardReplyForClusterShards(client *c, list *nodes) { addReplyBulkCString(c, "slots"); /* Use slot_info_pairs from the primary only */ - n = clusterNodeGetMaster(n); + n = clusterNodeGetPrimary(n); if (n->slot_info_pairs != NULL) { serverAssert((n->slot_info_pairs_count % 2) == 0); @@ -5769,10 +5766,10 @@ int getClusterSize(void) { } int getMyShardSlotCount(void) { - if (!nodeIsSlave(server.cluster->myself)) { + if (!nodeIsReplica(server.cluster->myself)) { return server.cluster->myself->numslots; - } else if (server.cluster->myself->slaveof) { - return server.cluster->myself->slaveof->numslots; + } else if (server.cluster->myself->replicaof) { + return server.cluster->myself->replicaof->numslots; } else { return 0; } @@ -5798,8 +5795,8 @@ char **getClusterNodesList(size_t *numnodes) { return ids; } -int clusterNodeIsMaster(clusterNode *n) { - return n->flags & CLUSTER_NODE_MASTER; +int clusterNodeIsPrimary(clusterNode *n) { + return n->flags & CLUSTER_NODE_PRIMARY; } int handleDebugClusterCommand(client *c) { @@ -5843,13 +5840,19 @@ char *clusterNodeIp(clusterNode *node) { return node->ip; } -int clusterNodeIsSlave(clusterNode *node) { - return node->flags & CLUSTER_NODE_SLAVE; +int clusterNodeIsReplica(clusterNode *node) { + return node->flags & CLUSTER_NODE_REPLICA; } -clusterNode *clusterNodeGetMaster(clusterNode *node) { - while (node->slaveof != NULL) node = node->slaveof; - return node; +clusterNode *clusterNodeGetPrimary(clusterNode *node) { + clusterNode *primary = node; + while (primary->replicaof != NULL) { + primary = primary->replicaof; + if (primary == node) break; + } + /* Assert that a node's replicaof/primary chain does not form a cycle. */ + debugServerAssert(primary->replicaof == NULL); + return primary; } char *clusterNodeGetName(clusterNode *node) { @@ -5894,7 +5897,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, int optarg_pos = 0; /* Allow primaries to replicate "CLUSTER SETSLOT" */ - if (!(c->flags & CLIENT_MASTER) && nodeIsSlave(myself)) { + if (!(c->flags & CLIENT_PRIMARY) && nodeIsReplica(myself)) { addReplyError(c, "Please use SETSLOT only with masters."); return 0; } @@ -5903,7 +5906,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, if (!strcasecmp(c->argv[3]->ptr, "migrating") && c->argc >= 5) { /* CLUSTER SETSLOT MIGRATING */ - if (nodeIsMaster(myself) && server.cluster->slots[slot] != myself) { + if (nodeIsPrimary(myself) && server.cluster->slots[slot] != myself) { addReplyErrorFormat(c, "I'm not the owner of hash slot %u", slot); return 0; } @@ -5912,7 +5915,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "I don't know about node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } @@ -5928,7 +5931,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "I don't know about node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } @@ -5943,7 +5946,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "Unknown node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } @@ -6018,24 +6021,43 @@ void clusterCommandSetSlot(client *c) { * This ensures that all replicas have the latest topology information, enabling * a reliable slot ownership transfer even if the primary node went down during * the process. */ - if (nodeIsMaster(myself) && myself->numslaves != 0 && (c->flags & CLIENT_REPLICATION_DONE) == 0) { - forceCommandPropagation(c, PROPAGATE_REPL); - /* We are a primary and this is the first time we see this `SETSLOT` - * command. Force-replicate the command to all of our replicas - * first and only on success will we handle the command. - * Note that - * 1. All replicas are expected to ack the replication within the given timeout - * 2. The repl offset target is set to the master's current repl offset + 1. - * There is no concern of partial replication because replicas always - * ack the repl offset at the command boundary. */ - blockClientForReplicaAck(c, timeout_ms, server.master_repl_offset + 1, myself->numslaves, 0); - /* Mark client as pending command for execution after replication to replicas. */ - c->flags |= CLIENT_PENDING_COMMAND; - replicationRequestAckFromSlaves(); - return; + if (nodeIsPrimary(myself) && myself->num_replicas != 0 && (c->flags & CLIENT_REPLICATION_DONE) == 0) { + /* Iterate through the list of replicas to check if there are any running + * a version older than 8.0.0. Replicas with versions older than 8.0.0 do + * not support the CLUSTER SETSLOT command on replicas. If such a replica + * is found, we should skip the replication and fall back to the old + * non-replicated behavior.*/ + listIter li; + listNode *ln; + int legacy_replica_found = 0; + listRewind(server.replicas, &li); + while ((ln = listNext(&li))) { + client *r = ln->value; + if (r->replica_version < 0x80000 /* 8.0.0 */) { + legacy_replica_found++; + break; + } + } + + if (!legacy_replica_found) { + forceCommandPropagation(c, PROPAGATE_REPL); + /* We are a primary and this is the first time we see this `SETSLOT` + * command. Force-replicate the command to all of our replicas + * first and only on success will we handle the command. + * Note that + * 1. All replicas are expected to ack the replication within the given timeout + * 2. The repl offset target is set to the primary's current repl offset + 1. + * There is no concern of partial replication because replicas always + * ack the repl offset at the command boundary. */ + blockClientForReplicaAck(c, timeout_ms, server.primary_repl_offset + 1, myself->num_replicas, 0); + /* Mark client as pending command for execution after replication to replicas. */ + c->flags |= CLIENT_PENDING_COMMAND; + replicationRequestAckFromReplicas(); + return; + } } - /* Slot states have been updated on the replicas (if any). + /* Slot states have been updated on the compatible replicas (if any). * Now exuecte the command on the primary. */ if (!strcasecmp(c->argv[3]->ptr, "migrating")) { serverLog(LL_NOTICE, "Migrating slot %d to node %.40s (%s)", slot, n->name, n->human_nodename); @@ -6064,20 +6086,20 @@ void clusterCommandSetSlot(client *c) { clusterDelSlot(slot); clusterAddSlot(n, slot); - /* If we are a master left without slots, we should turn into a - * replica of the new master. */ + /* If we are a primary left without slots, we should turn into a + * replica of the new primary. */ if (slot_was_mine && n != myself && myself->numslots == 0 && server.cluster_allow_replica_migration) { serverLog(LL_NOTICE, "Lost my last slot during slot migration. Reconfiguring myself " "as a replica of %.40s (%s) in shard %.40s", n->name, n->human_nodename, n->shard_id); - clusterSetMaster(n, 1); + clusterSetPrimary(n, 1); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } /* If this node or this node's primary was importing this slot, * assigning the slot to itself also clears the importing status. */ - if ((n == myself || n == myself->slaveof) && server.cluster->importing_slots_from[slot]) { + if ((n == myself || n == myself->replicaof) && server.cluster->importing_slots_from[slot]) { server.cluster->importing_slots_from[slot] = NULL; /* Only primary broadcasts the updates */ @@ -6238,7 +6260,7 @@ int clusterCommandSpecial(client *c) { } else if (n == myself) { addReplyError(c, "I tried hard but I can't forget myself..."); return 1; - } else if (nodeIsSlave(myself) && myself->slaveof == n) { + } else if (nodeIsReplica(myself) && myself->replicaof == n) { addReplyError(c, "Can't forget my master!"); return 1; } @@ -6261,23 +6283,23 @@ int clusterCommandSpecial(client *c) { return 1; } - /* Can't replicate a slave. */ - if (nodeIsSlave(n)) { + /* Can't replicate a replica. */ + if (nodeIsReplica(n)) { addReplyError(c, "I can only replicate a master, not a replica."); return 1; } - /* If the instance is currently a master, it should have no assigned + /* If the instance is currently a primary, it should have no assigned * slots nor keys to accept to replicate some other node. - * Slaves can switch to another master without issues. */ - if (clusterNodeIsMaster(myself) && (myself->numslots != 0 || kvstoreSize(server.db[0].keys) != 0)) { + * Replicas can switch to another primary without issues. */ + if (clusterNodeIsPrimary(myself) && (myself->numslots != 0 || kvstoreSize(server.db[0].keys) != 0)) { addReplyError(c, "To set a master the node must be empty and " "without assigned slots."); return 1; } - /* Set the master. */ - clusterSetMaster(n, 1); + /* Set the primary. */ + clusterSetPrimary(n, 1); clusterBroadcastPong(CLUSTER_BROADCAST_ALL); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); addReply(c, shared.ok); @@ -6308,13 +6330,13 @@ int clusterCommandSpecial(client *c) { } /* Check preconditions. */ - if (clusterNodeIsMaster(myself)) { + if (clusterNodeIsPrimary(myself)) { addReplyError(c, "You should send CLUSTER FAILOVER to a replica"); return 1; - } else if (myself->slaveof == NULL) { + } else if (myself->replicaof == NULL) { addReplyError(c, "I'm a replica but my master is unknown to me"); return 1; - } else if (!force && (nodeFailed(myself->slaveof) || myself->slaveof->link == NULL)) { + } else if (!force && (nodeFailed(myself->replicaof) || myself->replicaof->link == NULL)) { addReplyError(c, "Master is down or failed, " "please use CLUSTER FAILOVER FORCE"); return 1; @@ -6325,20 +6347,20 @@ int clusterCommandSpecial(client *c) { if (takeover) { /* A takeover does not perform any initial check. It just * generates a new configuration epoch for this node without - * consensus, claims the master's slots, and broadcast the new + * consensus, claims the primary's slots, and broadcast the new * configuration. */ - serverLog(LL_NOTICE, "Taking over the master (user request)."); + serverLog(LL_NOTICE, "Taking over the primary (user request)."); clusterBumpConfigEpochWithoutConsensus(); - clusterFailoverReplaceYourMaster(); + clusterFailoverReplaceYourPrimary(); } else if (force) { /* If this is a forced failover, we don't need to talk with our - * master to agree about the offset. We just failover taking over + * primary to agree about the offset. We just failover taking over * it without coordination. */ serverLog(LL_NOTICE, "Forced failover user request accepted."); server.cluster->mf_can_start = 1; } else { serverLog(LL_NOTICE, "Manual failover user request accepted."); - clusterSendMFStart(myself->slaveof); + clusterSendMFStart(myself->replicaof); } addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "set-config-epoch") && c->argc == 3) { @@ -6388,9 +6410,9 @@ int clusterCommandSpecial(client *c) { } } - /* Slaves can be reset while containing data, but not master nodes + /* Replicas can be reset while containing data, but not primary nodes * that must be empty. */ - if (clusterNodeIsMaster(myself) && kvstoreSize(c->db->keys) != 0) { + if (clusterNodeIsPrimary(myself) && kvstoreSize(c->db->keys) != 0) { addReplyError(c, "CLUSTER RESET can't be called with " "master nodes containing keys"); return 1; @@ -6447,12 +6469,12 @@ const char **clusterCommandExtendedHelp(void) { return help; } -int clusterNodeNumSlaves(clusterNode *node) { - return node->numslaves; +int clusterNodeNumReplicas(clusterNode *node) { + return node->num_replicas; } -clusterNode *clusterNodeGetSlave(clusterNode *node, int slave_idx) { - return node->slaves[slave_idx]; +clusterNode *clusterNodeGetReplica(clusterNode *node, int replica_idx) { + return node->replicas[replica_idx]; } clusterNode *getMigratingSlotDest(int slot) { @@ -6498,13 +6520,13 @@ int clusterAllowFailoverCmd(client *c) { return 0; } -void clusterPromoteSelfToMaster(void) { - replicationUnsetMaster(); +void clusterPromoteSelfToPrimary(void) { + replicationUnsetPrimary(); } int detectAndUpdateCachedNodeHealth(void) { dictIterator di; - dictInitSafeIterator(&di, server.cluster->nodes); + dictInitIterator(&di, server.cluster->nodes); dictEntry *de; clusterNode *node; int overall_health_changed = 0; @@ -6520,39 +6542,82 @@ int detectAndUpdateCachedNodeHealth(void) { return overall_health_changed; } -/* Replicate migrating and importing slot states to all replicas */ -void clusterReplicateOpenSlots(void) { - if (!server.cluster_enabled) return; +/* Encode open slot states into an sds string to be persisted as an aux field in RDB. */ +sds clusterEncodeOpenSlotsAuxField(int rdbflags) { + if (!server.cluster_enabled) return NULL; - int argc = 5; - robj **argv = zmalloc(sizeof(robj *) * argc); + /* Open slots should not be persisted to an RDB file. This data is intended only for full sync. */ + if ((rdbflags & RDBFLAGS_REPLICATION) == 0) return NULL; - argv[0] = shared.cluster; - argv[1] = shared.setslot; + sds s = NULL; for (int i = 0; i < 2; i++) { - clusterNode **nodes_ptr = NULL; + clusterNode **nodes_ptr; if (i == 0) { nodes_ptr = server.cluster->importing_slots_from; - argv[3] = shared.importing; } else { nodes_ptr = server.cluster->migrating_slots_to; - argv[3] = shared.migrating; } for (int j = 0; j < CLUSTER_SLOTS; j++) { if (nodes_ptr[j] == NULL) continue; + if (s == NULL) s = sdsempty(); + s = sdscatfmt(s, "%i%s", j, (i == 0) ? "<" : ">"); + s = sdscatlen(s, nodes_ptr[j]->name, CLUSTER_NAMELEN); + s = sdscatlen(s, ",", 1); + } + } + + return s; +} + +/* Decode the open slot aux field and restore the in-memory slot states. */ +int clusterDecodeOpenSlotsAuxField(int rdbflags, sds s) { + if (!server.cluster_enabled || s == NULL) return C_OK; - argv[2] = createStringObjectFromLongLongForValue(j); - sds name = sdsnewlen(nodes_ptr[j]->name, sizeof(nodes_ptr[j]->name)); - argv[4] = createObject(OBJ_STRING, name); + /* Open slots should not be loaded from a persisted RDB file, but only from a full sync. */ + if ((rdbflags & RDBFLAGS_REPLICATION) == 0) return C_OK; - replicationFeedSlaves(0, argv, argc); + while (*s) { + /* Extract slot number */ + int slot = atoi(s); + if (slot < 0 || slot >= CLUSTER_SLOTS) return C_ERR; - decrRefCount(argv[2]); - decrRefCount(argv[4]); + while (*s && *s != '<' && *s != '>') s++; + if (*s != '<' && *s != '>') return C_ERR; + + /* Determine if it's an importing or migrating slot */ + int is_importing = (*s == '<'); + s++; + + /* Extract the node name */ + char node_name[CLUSTER_NAMELEN]; + int k = 0; + while (*s && *s != ',' && k < CLUSTER_NAMELEN) { + node_name[k++] = *s++; + } + + /* Ensure the node name is of the correct length */ + if (k != CLUSTER_NAMELEN || *s != ',') return C_ERR; + + /* Move to the next slot */ + s++; + + /* Find the corresponding node */ + clusterNode *node = clusterLookupNode(node_name, CLUSTER_NAMELEN); + if (!node) { + /* Create a new node if not found */ + node = createClusterNode(node_name, 0); + clusterAddNode(node); + } + + /* Set the slot state */ + if (is_importing) { + server.cluster->importing_slots_from[slot] = node; + } else { + server.cluster->migrating_slots_to[slot] = node; } } - zfree(argv); + return C_OK; } diff --git a/src/cluster_legacy.h b/src/cluster_legacy.h index cc02f30a8b..fb80f45eec 100644 --- a/src/cluster_legacy.h +++ b/src/cluster_legacy.h @@ -5,13 +5,13 @@ /* The following defines are amount of time, sometimes expressed as * multiplicators of the node timeout value (when ending with MULT). */ -#define CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ -#define CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if master is back. */ -#define CLUSTER_MF_TIMEOUT 5000 /* Milliseconds to do a manual failover. */ -#define CLUSTER_MF_PAUSE_MULT 2 /* Master pause manual failover mult. */ -#define CLUSTER_SLAVE_MIGRATION_DELAY 5000 /* Delay for slave migration. */ +#define CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ +#define CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if primary is back. */ +#define CLUSTER_MF_TIMEOUT 5000 /* Milliseconds to do a manual failover. */ +#define CLUSTER_MF_PAUSE_MULT 2 /* Primary pause manual failover mult. */ +#define CLUSTER_REPLICA_MIGRATION_DELAY 5000 /* Delay for replica migration. */ -/* Reasons why a slave is not able to failover. */ +/* Reasons why a replica is not able to failover. */ #define CLUSTER_CANT_FAILOVER_NONE 0 #define CLUSTER_CANT_FAILOVER_DATA_AGE 1 #define CLUSTER_CANT_FAILOVER_WAITING_DELAY 2 @@ -41,23 +41,23 @@ typedef struct clusterLink { } clusterLink; /* Cluster node flags and macros. */ -#define CLUSTER_NODE_MASTER 1 /* The node is a master */ -#define CLUSTER_NODE_SLAVE 2 /* The node is a slave */ -#define CLUSTER_NODE_PFAIL 4 /* Failure? Need acknowledge */ -#define CLUSTER_NODE_FAIL 8 /* The node is believed to be malfunctioning */ -#define CLUSTER_NODE_MYSELF 16 /* This node is myself */ -#define CLUSTER_NODE_HANDSHAKE 32 /* We have still to exchange the first ping */ -#define CLUSTER_NODE_NOADDR 64 /* We don't know the address of this node */ -#define CLUSTER_NODE_MEET 128 /* Send a MEET message to this node */ -#define CLUSTER_NODE_MIGRATE_TO 256 /* Master eligible for replica migration. */ -#define CLUSTER_NODE_NOFAILOVER 512 /* Slave will not try to failover. */ -#define CLUSTER_NODE_EXTENSIONS_SUPPORTED 1024 /* This node supports extensions. */ +#define CLUSTER_NODE_PRIMARY (1 << 0) /* The node is a primary */ +#define CLUSTER_NODE_REPLICA (1 << 1) /* The node is a replica */ +#define CLUSTER_NODE_PFAIL (1 << 2) /* Failure? Need acknowledge */ +#define CLUSTER_NODE_FAIL (1 << 3) /* The node is believed to be malfunctioning */ +#define CLUSTER_NODE_MYSELF (1 << 4) /* This node is myself */ +#define CLUSTER_NODE_HANDSHAKE (1 << 5) /* We have still to exchange the first ping */ +#define CLUSTER_NODE_NOADDR (1 << 6) /* We don't know the address of this node */ +#define CLUSTER_NODE_MEET (1 << 7) /* Send a MEET message to this node */ +#define CLUSTER_NODE_MIGRATE_TO (1 << 8) /* Primary eligible for replica migration. */ +#define CLUSTER_NODE_NOFAILOVER (1 << 9) /* replica will not try to failover. */ +#define CLUSTER_NODE_EXTENSIONS_SUPPORTED (1 << 10) /* This node supports extensions. */ #define CLUSTER_NODE_NULL_NAME \ "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" \ "\000\000\000\000\000\000\000\000\000\000\000\000" -#define nodeIsMaster(n) ((n)->flags & CLUSTER_NODE_MASTER) -#define nodeIsSlave(n) ((n)->flags & CLUSTER_NODE_SLAVE) +#define nodeIsPrimary(n) ((n)->flags & CLUSTER_NODE_PRIMARY) +#define nodeIsReplica(n) ((n)->flags & CLUSTER_NODE_REPLICA) #define nodeInHandshake(n) ((n)->flags & CLUSTER_NODE_HANDSHAKE) #define nodeHasAddr(n) (!((n)->flags & CLUSTER_NODE_NOADDR)) #define nodeTimedOut(n) ((n)->flags & CLUSTER_NODE_PFAIL) @@ -216,14 +216,14 @@ typedef struct { uint16_t type; /* Message type */ uint16_t count; /* Number of gossip sections. */ uint64_t currentEpoch; /* The epoch accordingly to the sending node. */ - uint64_t configEpoch; /* The config epoch if it's a master, or the last - epoch advertised by its master if it is a - slave. */ - uint64_t offset; /* Master replication offset if node is a master or - processed replication offset if node is a slave. */ + uint64_t configEpoch; /* The config epoch if it's a primary, or the last + epoch advertised by its primary if it is a + replica. */ + uint64_t offset; /* Primary replication offset if node is a primary or + processed replication offset if node is a replica. */ char sender[CLUSTER_NAMELEN]; /* Name of the sender node */ unsigned char myslots[CLUSTER_SLOTS / 8]; - char slaveof[CLUSTER_NAMELEN]; + char replicaof[CLUSTER_NAMELEN]; char myip[NET_IP_STR_LEN]; /* Sender IP, if not all zeroed. */ uint16_t extensions; /* Number of extensions sent along with this packet. */ char notused1[30]; /* 30 bytes reserved for future usage. */ @@ -256,7 +256,7 @@ static_assert(offsetof(clusterMsg, configEpoch) == 24, "unexpected field offset" static_assert(offsetof(clusterMsg, offset) == 32, "unexpected field offset"); static_assert(offsetof(clusterMsg, sender) == 40, "unexpected field offset"); static_assert(offsetof(clusterMsg, myslots) == 80, "unexpected field offset"); -static_assert(offsetof(clusterMsg, slaveof) == 2128, "unexpected field offset"); +static_assert(offsetof(clusterMsg, replicaof) == 2128, "unexpected field offset"); static_assert(offsetof(clusterMsg, myip) == 2168, "unexpected field offset"); static_assert(offsetof(clusterMsg, extensions) == 2214, "unexpected field offset"); static_assert(offsetof(clusterMsg, notused1) == 2216, "unexpected field offset"); @@ -271,10 +271,10 @@ static_assert(offsetof(clusterMsg, data) == 2256, "unexpected field offset"); /* Message flags better specify the packet content or are used to * provide some information about the node state. */ -#define CLUSTERMSG_FLAG0_PAUSED (1 << 0) /* Master paused for manual failover. */ +#define CLUSTERMSG_FLAG0_PAUSED (1 << 0) /* Primary paused for manual failover. */ #define CLUSTERMSG_FLAG0_FORCEACK \ (1 << 1) /* Give ACK to AUTH_REQUEST even if \ - master is up. */ + primary is up. */ #define CLUSTERMSG_FLAG0_EXT_DATA (1 << 2) /* Message contains extension data */ struct _clusterNode { @@ -287,20 +287,20 @@ struct _clusterNode { uint16_t *slot_info_pairs; /* Slots info represented as (start/end) pair (consecutive index). */ int slot_info_pairs_count; /* Used number of slots in slot_info_pairs */ int numslots; /* Number of slots handled by this node */ - int numslaves; /* Number of slave nodes, if this is a master */ - clusterNode **slaves; /* pointers to slave nodes */ - clusterNode *slaveof; /* pointer to the master node. Note that it - may be NULL even if the node is a slave - if we don't have the master node in our - tables. */ + int num_replicas; /* Number of replica nodes, if this is a primar */ + clusterNode **replicas; /* pointers to replica nodes */ + clusterNode *replicaof; /* pointer to the primary node. Note that it + may be NULL even if the node is a replica + if we don't have the parimary node in our + tables. */ unsigned long long last_in_ping_gossip; /* The number of the last carried in the ping gossip section */ mstime_t ping_sent; /* Unix time we sent latest ping */ mstime_t pong_received; /* Unix time we received the pong */ mstime_t data_received; /* Unix time we received any data */ mstime_t fail_time; /* Unix time when FAIL flag was set */ - mstime_t voted_time; /* Last time we voted for a slave of this master */ + mstime_t voted_time; /* Last time we voted for a replica of this parimary */ mstime_t repl_offset_time; /* Unix time we received offset for this node */ - mstime_t orphaned_time; /* Starting time of orphaned master condition */ + mstime_t orphaned_time; /* Starting time of orphaned primary condition */ long long repl_offset; /* Last known repl offset for this node. */ char ip[NET_IP_STR_LEN]; /* Latest known IP address of this node */ sds hostname; /* The known hostname for this node */ @@ -319,32 +319,32 @@ struct clusterState { clusterNode *myself; /* This node */ uint64_t currentEpoch; int state; /* CLUSTER_OK, CLUSTER_FAIL, ... */ - int size; /* Num of master nodes with at least one slot */ + int size; /* Num of primary nodes with at least one slot */ dict *nodes; /* Hash table of name -> clusterNode structures */ dict *shards; /* Hash table of shard_id -> list (of nodes) structures */ dict *nodes_black_list; /* Nodes we don't re-add for a few seconds. */ clusterNode *migrating_slots_to[CLUSTER_SLOTS]; clusterNode *importing_slots_from[CLUSTER_SLOTS]; clusterNode *slots[CLUSTER_SLOTS]; - /* The following fields are used to take the slave state on elections. */ + /* The following fields are used to take the replica state on elections. */ mstime_t failover_auth_time; /* Time of previous or next election. */ int failover_auth_count; /* Number of votes received so far. */ int failover_auth_sent; /* True if we already asked for votes. */ - int failover_auth_rank; /* This slave rank for current auth request. */ + int failover_auth_rank; /* This replica rank for current auth request. */ uint64_t failover_auth_epoch; /* Epoch of the current election. */ - int cant_failover_reason; /* Why a slave is currently not able to + int cant_failover_reason; /* Why a replica is currently not able to failover. See the CANT_FAILOVER_* macros. */ /* Manual failover state in common. */ mstime_t mf_end; /* Manual failover time limit (ms unixtime). It is zero if there is no MF in progress. */ - /* Manual failover state of master. */ - clusterNode *mf_slave; /* Slave performing the manual failover. */ - /* Manual failover state of slave. */ - long long mf_master_offset; /* Master offset the slave needs to start MF + /* Manual failover state of primary. */ + clusterNode *mf_replica; /* replica performing the manual failover. */ + /* Manual failover state of replica. */ + long long mf_primary_offset; /* Primary offset the replica needs to start MF or -1 if still not received. */ - int mf_can_start; /* If non-zero signal that the manual failover - can start requesting masters vote. */ - /* The following fields are used by masters to take state on elections. */ + int mf_can_start; /* If non-zero signal that the manual failover + can start requesting primary vote. */ + /* The following fields are used by primaries to take state on elections. */ uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */ int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ /* Stats */ diff --git a/src/commands.def b/src/commands.def index c59cb01dc1..e4484529a2 100644 --- a/src/commands.def +++ b/src/commands.def @@ -961,26 +961,26 @@ struct COMMAND_STRUCT CLUSTER_Subcommands[] = { {MAKE_CMD("countkeysinslot","Returns the number of keys in a hash slot.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_COUNTKEYSINSLOT_History,0,CLUSTER_COUNTKEYSINSLOT_Tips,0,clusterCommand,3,CMD_STALE,0,CLUSTER_COUNTKEYSINSLOT_Keyspecs,0,NULL,1),.args=CLUSTER_COUNTKEYSINSLOT_Args}, {MAKE_CMD("delslots","Sets hash slots as unbound for a node.","O(N) where N is the total number of hash slot arguments","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_DELSLOTS_History,0,CLUSTER_DELSLOTS_Tips,0,clusterCommand,-3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_DELSLOTS_Keyspecs,0,NULL,1),.args=CLUSTER_DELSLOTS_Args}, {MAKE_CMD("delslotsrange","Sets hash slot ranges as unbound for a node.","O(N) where N is the total number of the slots between the start slot and end slot arguments.","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_DELSLOTSRANGE_History,0,CLUSTER_DELSLOTSRANGE_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_DELSLOTSRANGE_Keyspecs,0,NULL,1),.args=CLUSTER_DELSLOTSRANGE_Args}, -{MAKE_CMD("failover","Forces a replica to perform a manual failover of its master.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FAILOVER_History,0,CLUSTER_FAILOVER_Tips,0,clusterCommand,-2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FAILOVER_Keyspecs,0,NULL,1),.args=CLUSTER_FAILOVER_Args}, +{MAKE_CMD("failover","Forces a replica to perform a manual failover of its primary.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FAILOVER_History,0,CLUSTER_FAILOVER_Tips,0,clusterCommand,-2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FAILOVER_Keyspecs,0,NULL,1),.args=CLUSTER_FAILOVER_Args}, {MAKE_CMD("flushslots","Deletes all slots information from a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FLUSHSLOTS_History,0,CLUSTER_FLUSHSLOTS_Tips,0,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FLUSHSLOTS_Keyspecs,0,NULL,0)}, {MAKE_CMD("forget","Removes a node from the nodes table.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FORGET_History,0,CLUSTER_FORGET_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FORGET_Keyspecs,0,NULL,1),.args=CLUSTER_FORGET_Args}, {MAKE_CMD("getkeysinslot","Returns the key names in a hash slot.","O(N) where N is the number of requested keys","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_GETKEYSINSLOT_History,0,CLUSTER_GETKEYSINSLOT_Tips,1,clusterCommand,4,CMD_STALE,0,CLUSTER_GETKEYSINSLOT_Keyspecs,0,NULL,2),.args=CLUSTER_GETKEYSINSLOT_Args}, {MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_HELP_History,0,CLUSTER_HELP_Tips,0,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_HELP_Keyspecs,0,NULL,0)}, -{MAKE_CMD("info","Returns information about the state of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_INFO_History,0,CLUSTER_INFO_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_INFO_Keyspecs,0,NULL,0)}, +{MAKE_CMD("info","Returns information about the state of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_INFO_History,0,CLUSTER_INFO_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_INFO_Keyspecs,0,NULL,0)}, {MAKE_CMD("keyslot","Returns the hash slot for a key.","O(N) where N is the number of bytes in the key","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_KEYSLOT_History,0,CLUSTER_KEYSLOT_Tips,0,clusterCommand,3,CMD_STALE,0,CLUSTER_KEYSLOT_Keyspecs,0,NULL,1),.args=CLUSTER_KEYSLOT_Args}, {MAKE_CMD("links","Returns a list of all TCP links to and from peer nodes.","O(N) where N is the total number of Cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_LINKS_History,0,CLUSTER_LINKS_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_LINKS_Keyspecs,0,NULL,0)}, {MAKE_CMD("meet","Forces a node to handshake with another node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MEET_History,1,CLUSTER_MEET_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_MEET_Keyspecs,0,NULL,3),.args=CLUSTER_MEET_Args}, -{MAKE_CMD("myid","Returns the ID of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYID_History,0,CLUSTER_MYID_Tips,0,clusterCommand,2,CMD_STALE,0,CLUSTER_MYID_Keyspecs,0,NULL,0)}, -{MAKE_CMD("myshardid","Returns the shard ID of a node.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYSHARDID_History,0,CLUSTER_MYSHARDID_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_MYSHARDID_Keyspecs,0,NULL,0)}, -{MAKE_CMD("nodes","Returns the cluster configuration for a node.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_NODES_History,0,CLUSTER_NODES_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_NODES_Keyspecs,0,NULL,0)}, -{MAKE_CMD("replicas","Lists the replica nodes of a master node.","O(N) where N is the number of replicas.","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICAS_History,0,CLUSTER_REPLICAS_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICAS_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICAS_Args}, -{MAKE_CMD("replicate","Configure a node as replica of a master node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICATE_History,0,CLUSTER_REPLICATE_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICATE_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICATE_Args}, +{MAKE_CMD("myid","Returns the ID of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYID_History,0,CLUSTER_MYID_Tips,0,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_MYID_Keyspecs,0,NULL,0)}, +{MAKE_CMD("myshardid","Returns the shard ID of a node.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYSHARDID_History,0,CLUSTER_MYSHARDID_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_MYSHARDID_Keyspecs,0,NULL,0)}, +{MAKE_CMD("nodes","Returns the cluster configuration for a node.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_NODES_History,0,CLUSTER_NODES_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_NODES_Keyspecs,0,NULL,0)}, +{MAKE_CMD("replicas","Lists the replica nodes of a primary node.","O(N) where N is the number of replicas.","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICAS_History,0,CLUSTER_REPLICAS_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICAS_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICAS_Args}, +{MAKE_CMD("replicate","Configure a node as replica of a primary node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICATE_History,0,CLUSTER_REPLICATE_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICATE_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICATE_Args}, {MAKE_CMD("reset","Resets a node.","O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_RESET_History,0,CLUSTER_RESET_Tips,0,clusterCommand,-2,CMD_ADMIN|CMD_STALE|CMD_NOSCRIPT,0,CLUSTER_RESET_Keyspecs,0,NULL,1),.args=CLUSTER_RESET_Args}, {MAKE_CMD("saveconfig","Forces a node to save the cluster configuration to disk.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SAVECONFIG_History,0,CLUSTER_SAVECONFIG_Tips,0,clusterCommand,2,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_SAVECONFIG_Keyspecs,0,NULL,0)}, {MAKE_CMD("set-config-epoch","Sets the configuration epoch for a new node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SET_CONFIG_EPOCH_History,0,CLUSTER_SET_CONFIG_EPOCH_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_SET_CONFIG_EPOCH_Keyspecs,0,NULL,1),.args=CLUSTER_SET_CONFIG_EPOCH_Args}, {MAKE_CMD("setslot","Binds a hash slot to a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SETSLOT_History,1,CLUSTER_SETSLOT_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE|CMD_MAY_REPLICATE,0,CLUSTER_SETSLOT_Keyspecs,0,NULL,3),.args=CLUSTER_SETSLOT_Args}, {MAKE_CMD("shards","Returns the mapping of cluster slots to shards.","O(N) where N is the total number of cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SHARDS_History,0,CLUSTER_SHARDS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SHARDS_Keyspecs,0,NULL,0)}, -{MAKE_CMD("slaves","Lists the replica nodes of a master node.","O(N) where N is the number of replicas.","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,0,CLUSTER_SLAVES_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_SLAVES_Keyspecs,0,NULL,1),.args=CLUSTER_SLAVES_Args}, +{MAKE_CMD("slaves","Lists the replica nodes of a primary node.","O(N) where N is the number of replicas.","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,0,CLUSTER_SLAVES_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_SLAVES_Keyspecs,0,NULL,1),.args=CLUSTER_SLAVES_Args}, {MAKE_CMD("slots","Returns the mapping of cluster slots to nodes.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,2,CLUSTER_SLOTS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SLOTS_Keyspecs,0,NULL,0)}, {0} }; @@ -1187,6 +1187,7 @@ commandHistory CLIENT_KILL_History[] = { {"5.0.0","Replaced `slave` `TYPE` with `replica`. `slave` still supported for backward compatibility."}, {"6.2.0","`LADDR` option."}, {"8.0.0","`MAXAGE` option."}, +{"8.0.0","Replaced `master` `TYPE` with `primary`. `master` still supported for backward compatibility."}, }; #endif @@ -1204,6 +1205,7 @@ commandHistory CLIENT_KILL_History[] = { struct COMMAND_ARG CLIENT_KILL_filter_new_format_client_type_Subargs[] = { {MAKE_ARG("normal",ARG_TYPE_PURE_TOKEN,-1,"NORMAL",NULL,NULL,CMD_ARG_NONE,0,NULL)}, {MAKE_ARG("master",ARG_TYPE_PURE_TOKEN,-1,"MASTER",NULL,"3.2.0",CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary",ARG_TYPE_PURE_TOKEN,-1,"PRIMARY",NULL,"8.0.0",CMD_ARG_NONE,0,NULL)}, {MAKE_ARG("slave",ARG_TYPE_PURE_TOKEN,-1,"SLAVE",NULL,NULL,CMD_ARG_NONE,0,NULL)}, {MAKE_ARG("replica",ARG_TYPE_PURE_TOKEN,-1,"REPLICA",NULL,"5.0.0",CMD_ARG_NONE,0,NULL)}, {MAKE_ARG("pubsub",ARG_TYPE_PURE_TOKEN,-1,"PUBSUB",NULL,NULL,CMD_ARG_NONE,0,NULL)}, @@ -1218,7 +1220,7 @@ struct COMMAND_ARG CLIENT_KILL_filter_new_format_skipme_Subargs[] = { /* CLIENT KILL filter new_format argument table */ struct COMMAND_ARG CLIENT_KILL_filter_new_format_Subargs[] = { {MAKE_ARG("client-id",ARG_TYPE_INTEGER,-1,"ID",NULL,"2.8.12",CMD_ARG_OPTIONAL,0,NULL)}, -{MAKE_ARG("client-type",ARG_TYPE_ONEOF,-1,"TYPE",NULL,"2.8.12",CMD_ARG_OPTIONAL,5,NULL),.subargs=CLIENT_KILL_filter_new_format_client_type_Subargs}, +{MAKE_ARG("client-type",ARG_TYPE_ONEOF,-1,"TYPE",NULL,"2.8.12",CMD_ARG_OPTIONAL,6,NULL),.subargs=CLIENT_KILL_filter_new_format_client_type_Subargs}, {MAKE_ARG("username",ARG_TYPE_STRING,-1,"USER",NULL,NULL,CMD_ARG_OPTIONAL,0,NULL)}, {MAKE_ARG("addr",ARG_TYPE_STRING,-1,"ADDR",NULL,NULL,CMD_ARG_OPTIONAL,0,NULL),.display_text="ip:port"}, {MAKE_ARG("laddr",ARG_TYPE_STRING,-1,"LADDR",NULL,"6.2.0",CMD_ARG_OPTIONAL,0,NULL),.display_text="ip:port"}, @@ -1248,6 +1250,7 @@ commandHistory CLIENT_LIST_History[] = { {"6.2.0","Added `argv-mem`, `tot-mem`, `laddr` and `redir` fields and the optional `ID` filter."}, {"7.0.0","Added `resp`, `multi-mem`, `rbs` and `rbp` fields."}, {"7.0.3","Added `ssub` field."}, +{"8.0.0","Replaced `master` `TYPE` with `primary`. `master` still supported for backward compatibility."}, }; #endif @@ -1554,8 +1557,8 @@ struct COMMAND_STRUCT CLIENT_Subcommands[] = { {MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_HELP_History,0,CLIENT_HELP_Tips,0,clientCommand,2,CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_HELP_Keyspecs,0,NULL,0)}, {MAKE_CMD("id","Returns the unique client ID of the connection.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_ID_History,0,CLIENT_ID_Tips,0,clientCommand,2,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_ID_Keyspecs,0,NULL,0)}, {MAKE_CMD("info","Returns information about the connection.","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_INFO_History,0,CLIENT_INFO_Tips,1,clientCommand,2,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_INFO_Keyspecs,0,NULL,0)}, -{MAKE_CMD("kill","Terminates open connections.","O(N) where N is the number of client connections","2.4.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_KILL_History,6,CLIENT_KILL_Tips,0,clientCommand,-3,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_KILL_Keyspecs,0,NULL,1),.args=CLIENT_KILL_Args}, -{MAKE_CMD("list","Lists open connections.","O(N) where N is the number of client connections","2.4.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_LIST_History,6,CLIENT_LIST_Tips,1,clientCommand,-2,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_LIST_Keyspecs,0,NULL,2),.args=CLIENT_LIST_Args}, +{MAKE_CMD("kill","Terminates open connections.","O(N) where N is the number of client connections","2.4.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_KILL_History,7,CLIENT_KILL_Tips,0,clientCommand,-3,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_KILL_Keyspecs,0,NULL,1),.args=CLIENT_KILL_Args}, +{MAKE_CMD("list","Lists open connections.","O(N) where N is the number of client connections","2.4.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_LIST_History,7,CLIENT_LIST_Tips,1,clientCommand,-2,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_LIST_Keyspecs,0,NULL,2),.args=CLIENT_LIST_Args}, {MAKE_CMD("no-evict","Sets the client eviction mode of the connection.","O(1)","7.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_NO_EVICT_History,0,CLIENT_NO_EVICT_Tips,0,clientCommand,3,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_NO_EVICT_Keyspecs,0,NULL,1),.args=CLIENT_NO_EVICT_Args}, {MAKE_CMD("no-touch","Controls whether commands sent by the client affect the LRU/LFU of accessed keys.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_NO_TOUCH_History,0,CLIENT_NO_TOUCH_Tips,0,clientCommand,3,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,CLIENT_NO_TOUCH_Keyspecs,0,NULL,1),.args=CLIENT_NO_TOUCH_Args}, {MAKE_CMD("pause","Suspends commands processing.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_PAUSE_History,1,CLIENT_PAUSE_Tips,0,clientCommand,-3,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_PAUSE_Keyspecs,0,NULL,2),.args=CLIENT_PAUSE_Args}, @@ -5330,6 +5333,28 @@ struct COMMAND_ARG SCRIPT_LOAD_Args[] = { {MAKE_ARG("script",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; +/********** SCRIPT SHOW ********************/ + +#ifndef SKIP_CMD_HISTORY_TABLE +/* SCRIPT SHOW history */ +#define SCRIPT_SHOW_History NULL +#endif + +#ifndef SKIP_CMD_TIPS_TABLE +/* SCRIPT SHOW tips */ +#define SCRIPT_SHOW_Tips NULL +#endif + +#ifndef SKIP_CMD_KEY_SPECS_TABLE +/* SCRIPT SHOW key specs */ +#define SCRIPT_SHOW_Keyspecs NULL +#endif + +/* SCRIPT SHOW argument table */ +struct COMMAND_ARG SCRIPT_SHOW_Args[] = { +{MAKE_ARG("sha1",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +}; + /* SCRIPT command table */ struct COMMAND_STRUCT SCRIPT_Subcommands[] = { {MAKE_CMD("debug","Sets the debug mode of server-side Lua scripts.","O(1)","3.2.0",CMD_DOC_NONE,NULL,NULL,"scripting",COMMAND_GROUP_SCRIPTING,SCRIPT_DEBUG_History,0,SCRIPT_DEBUG_Tips,0,scriptCommand,3,CMD_NOSCRIPT,ACL_CATEGORY_SCRIPTING,SCRIPT_DEBUG_Keyspecs,0,NULL,1),.args=SCRIPT_DEBUG_Args}, @@ -5338,6 +5363,7 @@ struct COMMAND_STRUCT SCRIPT_Subcommands[] = { {MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"scripting",COMMAND_GROUP_SCRIPTING,SCRIPT_HELP_History,0,SCRIPT_HELP_Tips,0,scriptCommand,2,CMD_LOADING|CMD_STALE,ACL_CATEGORY_SCRIPTING,SCRIPT_HELP_Keyspecs,0,NULL,0)}, {MAKE_CMD("kill","Terminates a server-side Lua script during execution.","O(1)","2.6.0",CMD_DOC_NONE,NULL,NULL,"scripting",COMMAND_GROUP_SCRIPTING,SCRIPT_KILL_History,0,SCRIPT_KILL_Tips,2,scriptCommand,2,CMD_NOSCRIPT|CMD_ALLOW_BUSY,ACL_CATEGORY_SCRIPTING,SCRIPT_KILL_Keyspecs,0,NULL,0)}, {MAKE_CMD("load","Loads a server-side Lua script to the script cache.","O(N) with N being the length in bytes of the script body.","2.6.0",CMD_DOC_NONE,NULL,NULL,"scripting",COMMAND_GROUP_SCRIPTING,SCRIPT_LOAD_History,0,SCRIPT_LOAD_Tips,2,scriptCommand,3,CMD_NOSCRIPT|CMD_STALE,ACL_CATEGORY_SCRIPTING,SCRIPT_LOAD_Keyspecs,0,NULL,1),.args=SCRIPT_LOAD_Args}, +{MAKE_CMD("show","Show server-side Lua script in the script cache.","O(1).","8.0.0",CMD_DOC_NONE,NULL,NULL,"scripting",COMMAND_GROUP_SCRIPTING,SCRIPT_SHOW_History,0,SCRIPT_SHOW_Tips,0,scriptCommand,3,CMD_NOSCRIPT,ACL_CATEGORY_SCRIPTING,SCRIPT_SHOW_Keyspecs,0,NULL,1),.args=SCRIPT_SHOW_Args}, {0} }; @@ -5377,7 +5403,7 @@ struct COMMAND_STRUCT SCRIPT_Subcommands[] = { /* SENTINEL CKQUORUM argument table */ struct COMMAND_ARG SENTINEL_CKQUORUM_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /********** SENTINEL CONFIG ********************/ @@ -5463,7 +5489,7 @@ struct COMMAND_ARG SENTINEL_DEBUG_Args[] = { /* SENTINEL FAILOVER argument table */ struct COMMAND_ARG SENTINEL_FAILOVER_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /********** SENTINEL FLUSHCONFIG ********************/ @@ -5502,7 +5528,7 @@ struct COMMAND_ARG SENTINEL_FAILOVER_Args[] = { /* SENTINEL GET_MASTER_ADDR_BY_NAME argument table */ struct COMMAND_ARG SENTINEL_GET_MASTER_ADDR_BY_NAME_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /********** SENTINEL HELP ********************/ @@ -5588,7 +5614,7 @@ struct COMMAND_ARG SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args[] = { /* SENTINEL MASTER argument table */ struct COMMAND_ARG SENTINEL_MASTER_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /********** SENTINEL MASTERS ********************/ @@ -5686,7 +5712,7 @@ struct COMMAND_ARG SENTINEL_MONITOR_Args[] = { /* SENTINEL REMOVE argument table */ struct COMMAND_ARG SENTINEL_REMOVE_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /********** SENTINEL REPLICAS ********************/ @@ -5708,7 +5734,7 @@ struct COMMAND_ARG SENTINEL_REMOVE_Args[] = { /* SENTINEL REPLICAS argument table */ struct COMMAND_ARG SENTINEL_REPLICAS_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /********** SENTINEL RESET ********************/ @@ -5752,7 +5778,7 @@ struct COMMAND_ARG SENTINEL_RESET_Args[] = { /* SENTINEL SENTINELS argument table */ struct COMMAND_ARG SENTINEL_SENTINELS_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /********** SENTINEL SET ********************/ @@ -5780,7 +5806,7 @@ struct COMMAND_ARG SENTINEL_SET_data_Subargs[] = { /* SENTINEL SET argument table */ struct COMMAND_ARG SENTINEL_SET_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, {MAKE_ARG("data",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,2,NULL),.subargs=SENTINEL_SET_data_Subargs}, }; @@ -5832,7 +5858,7 @@ struct COMMAND_ARG SENTINEL_SIMULATE_FAILURE_Args[] = { /* SENTINEL SLAVES argument table */ struct COMMAND_ARG SENTINEL_SLAVES_Args[] = { -{MAKE_ARG("master-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; /* SENTINEL command table */ @@ -5842,20 +5868,20 @@ struct COMMAND_STRUCT SENTINEL_Subcommands[] = { {MAKE_CMD("debug","Lists or updates the current configurable parameters of Sentinel.","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,0,SENTINEL_DEBUG_Tips,0,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_DEBUG_Keyspecs,0,NULL,1),.args=SENTINEL_DEBUG_Args}, {MAKE_CMD("failover","Forces a Sentinel failover.",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_FAILOVER_History,0,SENTINEL_FAILOVER_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_FAILOVER_Keyspecs,0,NULL,1),.args=SENTINEL_FAILOVER_Args}, {MAKE_CMD("flushconfig","Rewrites the Sentinel configuration file.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_FLUSHCONFIG_History,0,SENTINEL_FLUSHCONFIG_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_FLUSHCONFIG_Keyspecs,0,NULL,0)}, -{MAKE_CMD("get-master-addr-by-name","Returns the port and address of a master instance.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Keyspecs,0,NULL,1),.args=SENTINEL_GET_MASTER_ADDR_BY_NAME_Args}, +{MAKE_CMD("get-master-addr-by-name","Returns the port and address of a primary instance.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Keyspecs,0,NULL,1),.args=SENTINEL_GET_MASTER_ADDR_BY_NAME_Args}, {MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_HELP_History,0,SENTINEL_HELP_Tips,0,sentinelCommand,2,CMD_LOADING|CMD_STALE|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_HELP_Keyspecs,0,NULL,0)}, {MAKE_CMD("info-cache","Returns the cached `INFO` replies from the deployment's instances.","O(N) where N is the number of instances","3.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_INFO_CACHE_History,0,SENTINEL_INFO_CACHE_Tips,0,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_INFO_CACHE_Keyspecs,0,NULL,1),.args=SENTINEL_INFO_CACHE_Args}, -{MAKE_CMD("is-master-down-by-addr","Determines whether a master instance is down.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Tips,0,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Keyspecs,0,NULL,4),.args=SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args}, -{MAKE_CMD("master","Returns the state of a master instance.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,0,SENTINEL_MASTER_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTER_Keyspecs,0,NULL,1),.args=SENTINEL_MASTER_Args}, -{MAKE_CMD("masters","Returns a list of monitored masters.","O(N) where N is the number of masters","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTERS_History,0,SENTINEL_MASTERS_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTERS_Keyspecs,0,NULL,0)}, +{MAKE_CMD("is-master-down-by-addr","Determines whether a primary instance is down.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Tips,0,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Keyspecs,0,NULL,4),.args=SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args}, +{MAKE_CMD("master","Returns the state of a primary instance.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,0,SENTINEL_MASTER_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTER_Keyspecs,0,NULL,1),.args=SENTINEL_MASTER_Args}, +{MAKE_CMD("masters","Returns a list of monitored primaries.","O(N) where N is the number of primaries","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTERS_History,0,SENTINEL_MASTERS_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTERS_Keyspecs,0,NULL,0)}, {MAKE_CMD("monitor","Starts monitoring.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MONITOR_History,0,SENTINEL_MONITOR_Tips,0,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MONITOR_Keyspecs,0,NULL,4),.args=SENTINEL_MONITOR_Args}, {MAKE_CMD("myid","Returns the Sentinel instance ID.","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MYID_History,0,SENTINEL_MYID_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MYID_Keyspecs,0,NULL,0)}, {MAKE_CMD("pending-scripts","Returns information about pending scripts for Sentinel.",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_PENDING_SCRIPTS_History,0,SENTINEL_PENDING_SCRIPTS_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_PENDING_SCRIPTS_Keyspecs,0,NULL,0)}, {MAKE_CMD("remove","Stops monitoring.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_REMOVE_History,0,SENTINEL_REMOVE_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_REMOVE_Keyspecs,0,NULL,1),.args=SENTINEL_REMOVE_Args}, {MAKE_CMD("replicas","Returns a list of the monitored replicas.","O(N) where N is the number of replicas","5.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_REPLICAS_History,0,SENTINEL_REPLICAS_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_REPLICAS_Keyspecs,0,NULL,1),.args=SENTINEL_REPLICAS_Args}, -{MAKE_CMD("reset","Resets masters by name matching a pattern.","O(N) where N is the number of monitored masters","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_RESET_History,0,SENTINEL_RESET_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_RESET_Keyspecs,0,NULL,1),.args=SENTINEL_RESET_Args}, +{MAKE_CMD("reset","Resets primaries by name matching a pattern.","O(N) where N is the number of monitored primaries","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_RESET_History,0,SENTINEL_RESET_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_RESET_Keyspecs,0,NULL,1),.args=SENTINEL_RESET_Args}, {MAKE_CMD("sentinels","Returns a list of Sentinel instances.","O(N) where N is the number of Sentinels","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_SENTINELS_History,0,SENTINEL_SENTINELS_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_SENTINELS_Keyspecs,0,NULL,1),.args=SENTINEL_SENTINELS_Args}, -{MAKE_CMD("set","Changes the configuration of a monitored master.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_SET_History,0,SENTINEL_SET_Tips,0,sentinelCommand,-5,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_SET_Keyspecs,0,NULL,2),.args=SENTINEL_SET_Args}, +{MAKE_CMD("set","Changes the configuration of a monitored primary.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_SET_History,0,SENTINEL_SET_Tips,0,sentinelCommand,-5,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_SET_Keyspecs,0,NULL,2),.args=SENTINEL_SET_Args}, {MAKE_CMD("simulate-failure","Simulates failover scenarios.",NULL,"3.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_SIMULATE_FAILURE_History,0,SENTINEL_SIMULATE_FAILURE_Tips,0,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_SIMULATE_FAILURE_Keyspecs,0,NULL,1),.args=SENTINEL_SIMULATE_FAILURE_Args}, {MAKE_CMD("slaves","Returns a list of the monitored replicas.","O(N) where N is the number of replicas.","2.8.0",CMD_DOC_DEPRECATED,"`SENTINEL REPLICAS`","5.0.0","sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_SLAVES_History,0,SENTINEL_SLAVES_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_SLAVES_Keyspecs,0,NULL,1),.args=SENTINEL_SLAVES_Args}, {0} @@ -10717,7 +10743,7 @@ struct COMMAND_STRUCT serverCommandTable[] = { {MAKE_CMD("type","Determines the type of value stored at a key.","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,TYPE_History,0,TYPE_Tips,0,typeCommand,2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_KEYSPACE,TYPE_Keyspecs,1,NULL,1),.args=TYPE_Args}, {MAKE_CMD("unlink","Asynchronously deletes one or more keys.","O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of.","4.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,UNLINK_History,0,UNLINK_Tips,2,unlinkCommand,-2,CMD_WRITE|CMD_FAST,ACL_CATEGORY_KEYSPACE,UNLINK_Keyspecs,1,NULL,1),.args=UNLINK_Args}, {MAKE_CMD("wait","Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAIT_History,0,WAIT_Tips,2,waitCommand,3,CMD_BLOCKING,ACL_CATEGORY_CONNECTION,WAIT_Keyspecs,0,NULL,2),.args=WAIT_Args}, -{MAKE_CMD("waitaof","Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the master and/or replicas.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAITAOF_History,0,WAITAOF_Tips,2,waitaofCommand,4,CMD_BLOCKING,ACL_CATEGORY_CONNECTION,WAITAOF_Keyspecs,0,NULL,3),.args=WAITAOF_Args}, +{MAKE_CMD("waitaof","Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the primary and/or replicas.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAITAOF_History,0,WAITAOF_Tips,2,waitaofCommand,4,CMD_BLOCKING,ACL_CATEGORY_CONNECTION,WAITAOF_Keyspecs,0,NULL,3),.args=WAITAOF_Args}, /* geo */ {MAKE_CMD("geoadd","Adds one or more members to a geospatial index. The key is created if it doesn't exist.","O(log(N)) for each item added, where N is the number of elements in the sorted set.","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEOADD_History,1,GEOADD_Tips,0,geoaddCommand,-5,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_GEO,GEOADD_Keyspecs,1,NULL,4),.args=GEOADD_Args}, {MAKE_CMD("geodist","Returns the distance between two members of a geospatial index.","O(1)","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEODIST_History,0,GEODIST_Tips,0,geodistCommand,-4,CMD_READONLY,ACL_CATEGORY_GEO,GEODIST_Keyspecs,1,NULL,4),.args=GEODIST_Args}, @@ -10816,12 +10842,12 @@ struct COMMAND_STRUCT serverCommandTable[] = { {MAKE_CMD("monitor","Listens for all requests received by the server in real-time.",NULL,"1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,MONITOR_History,0,MONITOR_Tips,0,monitorCommand,1,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE,0,MONITOR_Keyspecs,0,NULL,0)}, {MAKE_CMD("psync","An internal command used in replication.",NULL,"2.8.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,PSYNC_History,0,PSYNC_Tips,0,syncCommand,-3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NO_MULTI|CMD_NOSCRIPT,0,PSYNC_Keyspecs,0,NULL,2),.args=PSYNC_Args}, {MAKE_CMD("replconf","An internal command for configuring the replication stream.","O(1)","3.0.0",CMD_DOC_SYSCMD,NULL,NULL,"server",COMMAND_GROUP_SERVER,REPLCONF_History,0,REPLCONF_Tips,0,replconfCommand,-1,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_ALLOW_BUSY,0,REPLCONF_Keyspecs,0,NULL,0)}, -{MAKE_CMD("replicaof","Configures a server as replica of another, or promotes it to a master.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,REPLICAOF_History,0,REPLICAOF_Tips,0,replicaofCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_STALE,0,REPLICAOF_Keyspecs,0,NULL,1),.args=REPLICAOF_Args}, +{MAKE_CMD("replicaof","Configures a server as replica of another, or promotes it to a primary.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,REPLICAOF_History,0,REPLICAOF_Tips,0,replicaofCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_STALE,0,REPLICAOF_Keyspecs,0,NULL,1),.args=REPLICAOF_Args}, {MAKE_CMD("restore-asking","An internal command for migrating keys in a cluster.","O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N)).","3.0.0",CMD_DOC_SYSCMD,NULL,NULL,"server",COMMAND_GROUP_SERVER,RESTORE_ASKING_History,3,RESTORE_ASKING_Tips,0,restoreCommand,-4,CMD_WRITE|CMD_DENYOOM|CMD_ASKING,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,RESTORE_ASKING_Keyspecs,1,NULL,7),.args=RESTORE_ASKING_Args}, {MAKE_CMD("role","Returns the replication role.","O(1)","2.8.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,ROLE_History,0,ROLE_Tips,0,roleCommand,1,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_FAST|CMD_SENTINEL,ACL_CATEGORY_ADMIN|ACL_CATEGORY_DANGEROUS,ROLE_Keyspecs,0,NULL,0)}, {MAKE_CMD("save","Synchronously saves the database(s) to disk.","O(N) where N is the total number of keys in all databases","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SAVE_History,0,SAVE_Tips,0,saveCommand,1,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_NO_MULTI,0,SAVE_Keyspecs,0,NULL,0)}, {MAKE_CMD("shutdown","Synchronously saves the database(s) to disk and shuts down the server.","O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SHUTDOWN_History,1,SHUTDOWN_Tips,0,shutdownCommand,-1,CMD_ADMIN|CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_NO_MULTI|CMD_SENTINEL|CMD_ALLOW_BUSY,0,SHUTDOWN_Keyspecs,0,NULL,1),.args=SHUTDOWN_Args}, -{MAKE_CMD("slaveof","Sets a server as a replica of another, or promotes it to being a master.","O(1)","1.0.0",CMD_DOC_DEPRECATED,"`REPLICAOF`","5.0.0","server",COMMAND_GROUP_SERVER,SLAVEOF_History,0,SLAVEOF_Tips,0,replicaofCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_STALE,0,SLAVEOF_Keyspecs,0,NULL,1),.args=SLAVEOF_Args}, +{MAKE_CMD("slaveof","Sets a server as a replica of another, or promotes it to being a primary.","O(1)","1.0.0",CMD_DOC_DEPRECATED,"`REPLICAOF`","5.0.0","server",COMMAND_GROUP_SERVER,SLAVEOF_History,0,SLAVEOF_Tips,0,replicaofCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NOSCRIPT|CMD_STALE,0,SLAVEOF_Keyspecs,0,NULL,1),.args=SLAVEOF_Args}, {MAKE_CMD("slowlog","A container for slow log commands.","Depends on subcommand.","2.2.12",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SLOWLOG_History,0,SLOWLOG_Tips,0,NULL,-2,0,0,SLOWLOG_Keyspecs,0,NULL,0),.subcommands=SLOWLOG_Subcommands}, {MAKE_CMD("swapdb","Swaps two databases.","O(N) where N is the count of clients watching or blocking on keys from both databases.","4.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SWAPDB_History,0,SWAPDB_Tips,0,swapdbCommand,3,CMD_WRITE|CMD_FAST,ACL_CATEGORY_KEYSPACE|ACL_CATEGORY_DANGEROUS,SWAPDB_Keyspecs,0,NULL,2),.args=SWAPDB_Args}, {MAKE_CMD("sync","An internal command used in replication.",NULL,"1.0.0",CMD_DOC_NONE,NULL,NULL,"server",COMMAND_GROUP_SERVER,SYNC_History,0,SYNC_Tips,0,syncCommand,1,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_NO_MULTI|CMD_NOSCRIPT,0,SYNC_Keyspecs,0,NULL,0)}, diff --git a/src/commands/client-kill.json b/src/commands/client-kill.json index 01079ad993..97fa932cd8 100644 --- a/src/commands/client-kill.json +++ b/src/commands/client-kill.json @@ -31,6 +31,10 @@ [ "8.0.0", "`MAXAGE` option." + ], + [ + "8.0.0", + "Replaced `master` `TYPE` with `primary`. `master` still supported for backward compatibility." ] ], "command_flags": [ @@ -84,6 +88,12 @@ "token": "master", "since": "3.2.0" }, + { + "name": "primary", + "type": "pure-token", + "token": "primary", + "since": "8.0.0" + }, { "name": "slave", "type": "pure-token", diff --git a/src/commands/client-list.json b/src/commands/client-list.json index f72ffaf40a..d9c0054e60 100644 --- a/src/commands/client-list.json +++ b/src/commands/client-list.json @@ -31,6 +31,10 @@ [ "7.0.3", "Added `ssub` field." + ], + [ + "8.0.0", + "Replaced `master` `TYPE` with `primary`. `master` still supported for backward compatibility." ] ], "command_flags": [ diff --git a/src/commands/cluster-failover.json b/src/commands/cluster-failover.json index f58fd562a7..9b31e310eb 100644 --- a/src/commands/cluster-failover.json +++ b/src/commands/cluster-failover.json @@ -1,6 +1,6 @@ { "FAILOVER": { - "summary": "Forces a replica to perform a manual failover of its master.", + "summary": "Forces a replica to perform a manual failover of its primary.", "complexity": "O(1)", "group": "cluster", "since": "3.0.0", diff --git a/src/commands/cluster-info.json b/src/commands/cluster-info.json index 2c88760eb7..023d9b46bb 100644 --- a/src/commands/cluster-info.json +++ b/src/commands/cluster-info.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "command_tips": [ diff --git a/src/commands/cluster-myid.json b/src/commands/cluster-myid.json index caa62de756..4ef1ff7de9 100644 --- a/src/commands/cluster-myid.json +++ b/src/commands/cluster-myid.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "reply_schema": { diff --git a/src/commands/cluster-myshardid.json b/src/commands/cluster-myshardid.json index 01c05ba926..0e08417eec 100644 --- a/src/commands/cluster-myshardid.json +++ b/src/commands/cluster-myshardid.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "command_tips": [ diff --git a/src/commands/cluster-nodes.json b/src/commands/cluster-nodes.json index 9c5fcbe9a4..e12bca36b2 100644 --- a/src/commands/cluster-nodes.json +++ b/src/commands/cluster-nodes.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "command_tips": [ diff --git a/src/commands/cluster-replicas.json b/src/commands/cluster-replicas.json index 4e8bd4204c..2fb47afea4 100644 --- a/src/commands/cluster-replicas.json +++ b/src/commands/cluster-replicas.json @@ -1,6 +1,6 @@ { "REPLICAS": { - "summary": "Lists the replica nodes of a master node.", + "summary": "Lists the replica nodes of a primary node.", "complexity": "O(N) where N is the number of replicas.", "group": "cluster", "since": "5.0.0", @@ -21,7 +21,7 @@ } ], "reply_schema": { - "description": "A list of replica nodes replicating from the specified master node provided in the same format used by CLUSTER NODES.", + "description": "A list of replica nodes replicating from the specified primary node provided in the same format used by CLUSTER NODES.", "type": "array", "items": { "type": "string", diff --git a/src/commands/cluster-replicate.json b/src/commands/cluster-replicate.json index 060d4af190..857a8022b8 100644 --- a/src/commands/cluster-replicate.json +++ b/src/commands/cluster-replicate.json @@ -1,6 +1,6 @@ { "REPLICATE": { - "summary": "Configure a node as replica of a master node.", + "summary": "Configure a node as replica of a primary node.", "complexity": "O(1)", "group": "cluster", "since": "3.0.0", diff --git a/src/commands/cluster-slaves.json b/src/commands/cluster-slaves.json index db66a1c1db..7059e544bb 100644 --- a/src/commands/cluster-slaves.json +++ b/src/commands/cluster-slaves.json @@ -1,6 +1,6 @@ { "SLAVES": { - "summary": "Lists the replica nodes of a master node.", + "summary": "Lists the replica nodes of a primary node.", "complexity": "O(N) where N is the number of replicas.", "group": "cluster", "since": "3.0.0", @@ -26,7 +26,7 @@ } ], "reply_schema": { - "description": "A list of replica nodes replicating from the specified master node provided in the same format used by CLUSTER NODES.", + "description": "A list of replica nodes replicating from the specified primary node provided in the same format used by CLUSTER NODES.", "type": "array", "items": { "type": "string", diff --git a/src/commands/cluster-slots.json b/src/commands/cluster-slots.json index ca48f371ea..5d00280f15 100644 --- a/src/commands/cluster-slots.json +++ b/src/commands/cluster-slots.json @@ -42,7 +42,7 @@ }, { "type": "array", - "description": "Master node for the slot range.", + "description": "Primary node for the slot range.", "minItems": 4, "maxItems": 4, "items": [ diff --git a/src/commands/replicaof.json b/src/commands/replicaof.json index 6ddedf2d68..cd5102171c 100644 --- a/src/commands/replicaof.json +++ b/src/commands/replicaof.json @@ -1,6 +1,6 @@ { "REPLICAOF": { - "summary": "Configures a server as replica of another, or promotes it to a master.", + "summary": "Configures a server as replica of another, or promotes it to a primary.", "complexity": "O(1)", "group": "server", "since": "5.0.0", diff --git a/src/commands/role.json b/src/commands/role.json index 1c3a4490ca..d31396faf6 100644 --- a/src/commands/role.json +++ b/src/commands/role.json @@ -28,7 +28,7 @@ "const": "master" }, { - "description": "Current replication master offset.", + "description": "Current replication primary offset.", "type": "integer" }, { @@ -65,18 +65,18 @@ "const": "slave" }, { - "description": "IP of master.", + "description": "IP of primary.", "type": "string" }, { - "description": "Port number of master.", + "description": "Port number of primary.", "type": "integer" }, { - "description": "State of the replication from the point of view of the master.", + "description": "State of the replication from the point of view of the primary.", "oneOf": [ { - "description": "The instance is in handshake with its master.", + "description": "The instance is in handshake with its primary.", "const": "handshake" }, { @@ -84,15 +84,15 @@ "const": "none" }, { - "description": "The instance needs to connect to its master.", + "description": "The instance needs to connect to its primary.", "const": "connect" }, { - "description": "The master-replica connection is in progress.", + "description": "The primary-replica connection is in progress.", "const": "connecting" }, { - "description": "The master and replica are trying to perform the synchronization.", + "description": "The primary and replica are trying to perform the synchronization.", "const": "sync" }, { @@ -106,7 +106,7 @@ ] }, { - "description": "The amount of data received from the replica so far in terms of master replication offset.", + "description": "The amount of data received from the replica so far in terms of primary replication offset.", "type": "integer" } ] @@ -120,7 +120,7 @@ "const": "sentinel" }, { - "description": "List of master names monitored by this sentinel instance.", + "description": "List of primary names monitored by this sentinel instance.", "type": "array", "items": { "type": "string" diff --git a/src/commands/script-show.json b/src/commands/script-show.json new file mode 100644 index 0000000000..f22fa29675 --- /dev/null +++ b/src/commands/script-show.json @@ -0,0 +1,27 @@ +{ + "SHOW": { + "summary": "Show server-side Lua script in the script cache.", + "complexity": "O(1).", + "group": "scripting", + "since": "8.0.0", + "arity": 3, + "container": "SCRIPT", + "function": "scriptCommand", + "command_flags": [ + "NOSCRIPT" + ], + "acl_categories": [ + "SCRIPTING" + ], + "arguments": [ + { + "name": "sha1", + "type": "string" + } + ], + "reply_schema": { + "description": "Lua script if sha1 hash exists in script cache.", + "type": "string" + } + } +} diff --git a/src/commands/sentinel-ckquorum.json b/src/commands/sentinel-ckquorum.json index e79132303f..7f4428c3fe 100644 --- a/src/commands/sentinel-ckquorum.json +++ b/src/commands/sentinel-ckquorum.json @@ -13,12 +13,12 @@ ], "reply_schema": { "type": "string", - "description": "Returns OK if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the majority needed to authorize the failover.", + "description": "Returns OK if the current Sentinel configuration is able to reach the quorum needed to failover a primary, and the majority needed to authorize the failover.", "pattern": "OK" }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/sentinel-failover.json b/src/commands/sentinel-failover.json index 8a211990f2..8e7c3ea3e7 100644 --- a/src/commands/sentinel-failover.json +++ b/src/commands/sentinel-failover.json @@ -13,11 +13,11 @@ ], "reply_schema": { "const": "OK", - "description": "Force a fail over as if the master was not reachable, and without asking for agreement to other Sentinels." + "description": "Force a fail over as if the primary was not reachable, and without asking for agreement to other Sentinels." }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/sentinel-get-master-addr-by-name.json b/src/commands/sentinel-get-master-addr-by-name.json index 1bcbec5341..2d7fc50eda 100644 --- a/src/commands/sentinel-get-master-addr-by-name.json +++ b/src/commands/sentinel-get-master-addr-by-name.json @@ -1,6 +1,6 @@ { "GET-MASTER-ADDR-BY-NAME": { - "summary": "Returns the port and address of a master instance.", + "summary": "Returns the port and address of a primary instance.", "complexity": "O(1)", "group": "sentinel", "since": "2.8.4", @@ -30,7 +30,7 @@ }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/sentinel-info-cache.json b/src/commands/sentinel-info-cache.json index af89f182ea..44edcf35e3 100644 --- a/src/commands/sentinel-info-cache.json +++ b/src/commands/sentinel-info-cache.json @@ -14,7 +14,7 @@ ], "reply_schema": { "type": "array", - "description": "This is actually a map, the odd entries are a master name, and the even entries are the last cached INFO output from that master and all its replicas.", + "description": "This is actually a map, the odd entries are a primary name, and the even entries are the last cached INFO output from that primary and all its replicas.", "minItems": 0, "maxItems": 4294967295, "items": [ @@ -22,11 +22,11 @@ "oneOf": [ { "type": "string", - "description": "The master name." + "description": "The primary name." }, { "type": "array", - "description": "This is an array of pairs, the odd entries are the INFO age, and the even entries are the cached INFO string. The first pair belong to the master and the rest are its replicas.", + "description": "This is an array of pairs, the odd entries are the INFO age, and the even entries are the cached INFO string. The first pair belong to the primary and the rest are its replicas.", "minItems": 2, "maxItems": 2, "items": [ diff --git a/src/commands/sentinel-is-master-down-by-addr.json b/src/commands/sentinel-is-master-down-by-addr.json index fd7698014c..3ecf8723fb 100644 --- a/src/commands/sentinel-is-master-down-by-addr.json +++ b/src/commands/sentinel-is-master-down-by-addr.json @@ -1,6 +1,6 @@ { "IS-MASTER-DOWN-BY-ADDR": { - "summary": "Determines whether a master instance is down.", + "summary": "Determines whether a primary instance is down.", "complexity": "O(1)", "group": "sentinel", "since": "2.8.4", @@ -21,11 +21,11 @@ "oneOf": [ { "const": 0, - "description": "Master is up." + "description": "Primary is up." }, { "const": 1, - "description": "Master is down." + "description": "Primary is down." } ] }, diff --git a/src/commands/sentinel-master.json b/src/commands/sentinel-master.json index ff94617aeb..3af3227394 100644 --- a/src/commands/sentinel-master.json +++ b/src/commands/sentinel-master.json @@ -1,6 +1,6 @@ { "MASTER": { - "summary": "Returns the state of a master instance.", + "summary": "Returns the state of a primary instance.", "complexity": "O(1)", "group": "sentinel", "since": "2.8.4", @@ -14,14 +14,14 @@ ], "reply_schema": { "type": "object", - "description": "The state and info of the specified master.", + "description": "The state and info of the specified primary.", "additionalProperties": { "type": "string" } }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/sentinel-masters.json b/src/commands/sentinel-masters.json index 26992585a1..b6aa86d02a 100644 --- a/src/commands/sentinel-masters.json +++ b/src/commands/sentinel-masters.json @@ -1,7 +1,7 @@ { "MASTERS": { - "summary": "Returns a list of monitored masters.", - "complexity": "O(N) where N is the number of masters", + "summary": "Returns a list of monitored primaries.", + "complexity": "O(N) where N is the number of primaries", "group": "sentinel", "since": "2.8.4", "arity": 2, @@ -14,7 +14,7 @@ ], "reply_schema": { "type": "array", - "description": "List of monitored masters, and their state.", + "description": "List of monitored primaries, and their state.", "items": { "type": "object", "additionalProperties": { diff --git a/src/commands/sentinel-remove.json b/src/commands/sentinel-remove.json index 1fe084f42c..7d545c3715 100644 --- a/src/commands/sentinel-remove.json +++ b/src/commands/sentinel-remove.json @@ -17,7 +17,7 @@ }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/sentinel-replicas.json b/src/commands/sentinel-replicas.json index 32b04e994a..a81ed0ef00 100644 --- a/src/commands/sentinel-replicas.json +++ b/src/commands/sentinel-replicas.json @@ -14,7 +14,7 @@ ], "reply_schema": { "type": "array", - "description": "List of replicas for this master, and their state.", + "description": "List of replicas for this primary, and their state.", "items": { "type": "object", "additionalProperties": { @@ -24,7 +24,7 @@ }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/sentinel-reset.json b/src/commands/sentinel-reset.json index 5d2a63f3d5..35153609cb 100644 --- a/src/commands/sentinel-reset.json +++ b/src/commands/sentinel-reset.json @@ -1,7 +1,7 @@ { "RESET": { - "summary": "Resets masters by name matching a pattern.", - "complexity": "O(N) where N is the number of monitored masters", + "summary": "Resets primaries by name matching a pattern.", + "complexity": "O(N) where N is the number of monitored primaries", "group": "sentinel", "since": "2.8.4", "arity": 3, @@ -14,7 +14,7 @@ ], "reply_schema": { "type": "integer", - "description": "The number of masters that were reset." + "description": "The number of primaries that were reset." }, "arguments": [ { diff --git a/src/commands/sentinel-sentinels.json b/src/commands/sentinel-sentinels.json index fdaa5cb992..dae12c5a9b 100644 --- a/src/commands/sentinel-sentinels.json +++ b/src/commands/sentinel-sentinels.json @@ -24,7 +24,7 @@ }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/sentinel-set.json b/src/commands/sentinel-set.json index abca33b89a..43523e6d6b 100644 --- a/src/commands/sentinel-set.json +++ b/src/commands/sentinel-set.json @@ -1,6 +1,6 @@ { "SET": { - "summary": "Changes the configuration of a monitored master.", + "summary": "Changes the configuration of a monitored primary.", "complexity": "O(1)", "group": "sentinel", "since": "2.8.4", @@ -17,7 +17,7 @@ }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" }, { diff --git a/src/commands/sentinel-slaves.json b/src/commands/sentinel-slaves.json index c1fec41bb2..9792270982 100644 --- a/src/commands/sentinel-slaves.json +++ b/src/commands/sentinel-slaves.json @@ -29,7 +29,7 @@ }, "arguments": [ { - "name": "master-name", + "name": "primary-name", "type": "string" } ] diff --git a/src/commands/slaveof.json b/src/commands/slaveof.json index ca30982887..509bdfbee3 100644 --- a/src/commands/slaveof.json +++ b/src/commands/slaveof.json @@ -1,6 +1,6 @@ { "SLAVEOF": { - "summary": "Sets a server as a replica of another, or promotes it to being a master.", + "summary": "Sets a server as a replica of another, or promotes it to being a primary.", "complexity": "O(1)", "group": "server", "since": "1.0.0", diff --git a/src/commands/waitaof.json b/src/commands/waitaof.json index 19b514c274..d664000b5f 100644 --- a/src/commands/waitaof.json +++ b/src/commands/waitaof.json @@ -1,6 +1,6 @@ { "WAITAOF": { - "summary": "Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the master and/or replicas.", + "summary": "Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the primary and/or replicas.", "complexity": "O(1)", "group": "generic", "since": "7.2.0", diff --git a/src/config.c b/src/config.c index 58b976ff0d..a9d650539b 100644 --- a/src/config.c +++ b/src/config.c @@ -124,7 +124,7 @@ configEnum propagation_error_behavior_enum[] = {{"ignore", PROPAGATION_ERR_BEHAV /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {0, 0, 0}, /* normal */ - {1024 * 1024 * 256, 1024 * 1024 * 64, 60}, /* slave */ + {1024 * 1024 * 256, 1024 * 1024 * 64, 60}, /* replica */ {1024 * 1024 * 32, 1024 * 1024 * 8, 60} /* pubsub */ }; @@ -373,7 +373,7 @@ static int updateClientOutputBufferLimit(sds *args, int arg_len, const char **er * error in a single client class is present. */ for (j = 0; j < arg_len; j += 4) { class = getClientTypeByName(args[j]); - if (class == -1 || class == CLIENT_TYPE_MASTER) { + if (class == -1 || class == CLIENT_TYPE_PRIMARY) { if (err) *err = "Invalid client class specified in " "buffer limit configuration."; @@ -574,7 +574,7 @@ void loadServerConfigFromString(char *config) { } /* Sanity checks. */ - if (server.cluster_enabled && server.masterhost) { + if (server.cluster_enabled && server.primary_host) { err = "replicaof directive not allowed in cluster mode"; goto loaderr; } @@ -986,7 +986,6 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state); dictType optionToLineDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ @@ -996,7 +995,6 @@ dictType optionToLineDictType = { dictType optionSetDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -1369,11 +1367,11 @@ void rewriteConfigSaveOption(standardConfig *config, const char *name, struct re if (!server.saveparamslen) { rewriteConfigRewriteLine(state, name, sdsnew("save \"\""), 1); } else { + line = sdsnew(name); for (j = 0; j < server.saveparamslen; j++) { - line = sdscatprintf(sdsempty(), "save %ld %d", (long)server.saveparams[j].seconds, - server.saveparams[j].changes); - rewriteConfigRewriteLine(state, name, line, 1); + line = sdscatprintf(line, " %ld %d", (long)server.saveparams[j].seconds, server.saveparams[j].changes); } + rewriteConfigRewriteLine(state, name, line, 1); } /* Mark "save" as processed in case server.saveparamslen is zero. */ @@ -1424,19 +1422,19 @@ void rewriteConfigDirOption(standardConfig *config, const char *name, struct rew rewriteConfigStringOption(state, name, cwd, NULL); } -/* Rewrite the slaveof option. */ +/* Rewrite the replicaof option. */ void rewriteConfigReplicaOfOption(standardConfig *config, const char *name, struct rewriteConfigState *state) { UNUSED(config); sds line; - /* If this is a master, we want all the slaveof config options + /* If this is a primary, we want all the replicaof config options * in the file to be removed. Note that if this is a cluster instance - * we don't want a slaveof directive inside valkey.conf. */ - if (server.cluster_enabled || server.masterhost == NULL) { + * we don't want a replicaof directive inside valkey.conf. */ + if (server.cluster_enabled || server.primary_host == NULL) { rewriteConfigMarkAsProcessed(state, name); return; } - line = sdscatprintf(sdsempty(), "%s %s %d", name, server.masterhost, server.masterport); + line = sdscatprintf(sdsempty(), "%s %s %d", name, server.primary_host, server.primary_port); rewriteConfigRewriteLine(state, name, line, 1); } @@ -2297,7 +2295,19 @@ static int isValidActiveDefrag(int val, const char **err) { return 1; } +static int isValidClusterConfigFile(char *val, const char **err) { + if (!strcmp(val, "")) { + *err = "cluster-config-file can't be empty"; + return 0; + } + return 1; +} + static int isValidDBfilename(char *val, const char **err) { + if (!strcmp(val, "")) { + *err = "dbfilename can't be empty"; + return 0; + } if (!pathIsBaseName(val)) { *err = "dbfilename can't be a path, just a filename"; return 0; @@ -2454,9 +2464,9 @@ static int updateMaxmemory(const char **err) { return 1; } -static int updateGoodSlaves(const char **err) { +static int updateGoodReplicas(const char **err) { UNUSED(err); - refreshGoodSlavesCount(); + refreshGoodReplicasCount(); return 1; } @@ -2660,6 +2670,10 @@ static int setConfigDirOption(standardConfig *config, sds *argv, int argc, const *err = "wrong number of arguments"; return 0; } + if (!strcmp(argv[0], "")) { + *err = "dir can't be empty"; + return 0; + } if (chdir(argv[0]) == -1) { *err = strerror(errno); return 0; @@ -2790,7 +2804,7 @@ static int setConfigOOMScoreAdjValuesOption(standardConfig *config, sds *argv, i * keep the configuration, which may still be valid for privileged processes. */ - if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_MASTER] || + if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_PRIMARY] || values[CONFIG_OOM_BGCHILD] < values[CONFIG_OOM_REPLICA]) { serverLog(LL_WARNING, "The oom-score-adj-values configuration may not work for non-privileged processes! " "Please consult the documentation."); @@ -2869,18 +2883,18 @@ static int setConfigReplicaOfOption(standardConfig *config, sds *argv, int argc, return 0; } - sdsfree(server.masterhost); - server.masterhost = NULL; + sdsfree(server.primary_host); + server.primary_host = NULL; if (!strcasecmp(argv[0], "no") && !strcasecmp(argv[1], "one")) { return 1; } char *ptr; - server.masterport = strtol(argv[1], &ptr, 10); - if (server.masterport < 0 || server.masterport > 65535 || *ptr != '\0') { - *err = "Invalid master port"; + server.primary_port = strtol(argv[1], &ptr, 10); + if (server.primary_port < 0 || server.primary_port > 65535 || *ptr != '\0') { + *err = "Invalid primary port"; return 0; } - server.masterhost = sdsnew(argv[0]); + server.primary_host = sdsnew(argv[0]); server.repl_state = REPL_STATE_CONNECT; return 1; } @@ -2893,8 +2907,8 @@ static sds getConfigBindOption(standardConfig *config) { static sds getConfigReplicaOfOption(standardConfig *config) { UNUSED(config); char buf[256]; - if (server.masterhost) - snprintf(buf, sizeof(buf), "%s %d", server.masterhost, server.masterport); + if (server.primary_host) + snprintf(buf, sizeof(buf), "%s %d", server.primary_host, server.primary_port); else buf[0] = '\0'; return sdsnew(buf); @@ -3032,11 +3046,11 @@ standardConfig static_configs[] = { createBoolConfig("aof-load-truncated", NULL, MODIFIABLE_CONFIG, server.aof_load_truncated, 1, NULL, NULL), createBoolConfig("aof-use-rdb-preamble", NULL, MODIFIABLE_CONFIG, server.aof_use_rdb_preamble, 1, NULL, NULL), createBoolConfig("aof-timestamp-enabled", NULL, MODIFIABLE_CONFIG, server.aof_timestamp_enabled, 0, NULL, NULL), - createBoolConfig("cluster-replica-no-failover", "cluster-slave-no-failover", MODIFIABLE_CONFIG, server.cluster_slave_no_failover, 0, NULL, updateClusterFlags), /* Failover by default. */ - createBoolConfig("replica-lazy-flush", "slave-lazy-flush", MODIFIABLE_CONFIG, server.repl_slave_lazy_flush, 0, NULL, NULL), + createBoolConfig("cluster-replica-no-failover", "cluster-slave-no-failover", MODIFIABLE_CONFIG, server.cluster_replica_no_failover, 0, NULL, updateClusterFlags), /* Failover by default. */ + createBoolConfig("replica-lazy-flush", "slave-lazy-flush", MODIFIABLE_CONFIG, server.repl_replica_lazy_flush, 0, NULL, NULL), createBoolConfig("replica-serve-stale-data", "slave-serve-stale-data", MODIFIABLE_CONFIG, server.repl_serve_stale_data, 1, NULL, NULL), - createBoolConfig("replica-read-only", "slave-read-only", DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_slave_ro, 1, NULL, NULL), - createBoolConfig("replica-ignore-maxmemory", "slave-ignore-maxmemory", MODIFIABLE_CONFIG, server.repl_slave_ignore_maxmemory, 1, NULL, NULL), + createBoolConfig("replica-read-only", "slave-read-only", DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_replica_ro, 1, NULL, NULL), + createBoolConfig("replica-ignore-maxmemory", "slave-ignore-maxmemory", MODIFIABLE_CONFIG, server.repl_replica_ignore_maxmemory, 1, NULL, NULL), createBoolConfig("jemalloc-bg-thread", NULL, MODIFIABLE_CONFIG, server.jemalloc_bg_thread, 1, NULL, updateJemallocBgThread), createBoolConfig("activedefrag", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.active_defrag_enabled, 0, isValidActiveDefrag, NULL), createBoolConfig("syslog-enabled", NULL, IMMUTABLE_CONFIG, server.syslog_enabled, 0, NULL, NULL), @@ -3060,10 +3074,10 @@ standardConfig static_configs[] = { createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL), createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL), - createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.slave_announce_ip, NULL, NULL, NULL), - createStringConfig("masteruser", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masteruser, NULL, NULL, NULL), + createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.replica_announce_ip, NULL, NULL, NULL), + createStringConfig("primaryuser", "masteruser", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_user, NULL, NULL, NULL), createStringConfig("cluster-announce-ip", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_ip, NULL, NULL, updateClusterIp), - createStringConfig("cluster-config-file", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.cluster_configfile, "nodes.conf", NULL, NULL), + createStringConfig("cluster-config-file", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.cluster_configfile, "nodes.conf", isValidClusterConfigFile, NULL), createStringConfig("cluster-announce-hostname", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_hostname, NULL, isValidAnnouncedHostname, updateClusterHostname), createStringConfig("cluster-announce-human-nodename", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_human_nodename, NULL, isValidAnnouncedNodename, updateClusterHumanNodename), createStringConfig("syslog-ident", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.syslog_ident, SERVER_NAME, NULL, NULL), @@ -3084,7 +3098,7 @@ standardConfig static_configs[] = { createStringConfig("locale-collate", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.locale_collate, "", NULL, updateLocaleCollate), /* SDS Configs */ - createSDSConfig("masterauth", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.masterauth, NULL, NULL, NULL), + createSDSConfig("primaryauth", "masterauth", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_auth, NULL, NULL, NULL), createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass), /* Enum Configs */ @@ -3110,7 +3124,7 @@ standardConfig static_configs[] = { createIntConfig("port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.port, 6379, INTEGER_CONFIG, NULL, updatePort), /* TCP port. */ createIntConfig("io-threads", NULL, DEBUG_CONFIG | IMMUTABLE_CONFIG, 1, 128, server.io_threads_num, 1, INTEGER_CONFIG, NULL, NULL), /* Single threaded by default */ createIntConfig("auto-aof-rewrite-percentage", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.aof_rewrite_perc, 100, INTEGER_CONFIG, NULL, NULL), - createIntConfig("cluster-replica-validity-factor", "cluster-slave-validity-factor", MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_slave_validity_factor, 10, INTEGER_CONFIG, NULL, NULL), /* Slave max data age factor. */ + createIntConfig("cluster-replica-validity-factor", "cluster-slave-validity-factor", MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_replica_validity_factor, 10, INTEGER_CONFIG, NULL, NULL), /* replica max data age factor. */ createIntConfig("list-max-listpack-size", "list-max-ziplist-size", MODIFIABLE_CONFIG, INT_MIN, INT_MAX, server.list_max_listpack_size, -2, INTEGER_CONFIG, NULL, NULL), createIntConfig("tcp-keepalive", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tcpkeepalive, 300, INTEGER_CONFIG, NULL, NULL), createIntConfig("cluster-migration-barrier", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_migration_barrier, 1, INTEGER_CONFIG, NULL, NULL), @@ -3120,26 +3134,26 @@ standardConfig static_configs[] = { createIntConfig("active-defrag-threshold-upper", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_upper, 100, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: maximum defrag force at 100% fragmentation */ createIntConfig("lfu-log-factor", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_log_factor, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("lfu-decay-time", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_decay_time, 1, INTEGER_CONFIG, NULL, NULL), - createIntConfig("replica-priority", "slave-priority", MODIFIABLE_CONFIG, 0, INT_MAX, server.slave_priority, 100, INTEGER_CONFIG, NULL, NULL), + createIntConfig("replica-priority", "slave-priority", MODIFIABLE_CONFIG, 0, INT_MAX, server.replica_priority, 100, INTEGER_CONFIG, NULL, NULL), createIntConfig("repl-diskless-sync-delay", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_delay, 5, INTEGER_CONFIG, NULL, NULL), createIntConfig("maxmemory-samples", NULL, MODIFIABLE_CONFIG, 1, 64, server.maxmemory_samples, 5, INTEGER_CONFIG, NULL, NULL), createIntConfig("maxmemory-eviction-tenacity", NULL, MODIFIABLE_CONFIG, 0, 100, server.maxmemory_eviction_tenacity, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.maxidletime, 0, INTEGER_CONFIG, NULL, NULL), /* Default client timeout: infinite */ - createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.slave_announce_port, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.replica_announce_port, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("tcp-backlog", NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */ createIntConfig("cluster-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.cluster_port, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Default: Use +10000 offset. */ createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.port */ createIntConfig("cluster-announce-tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_tls_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.tls_port */ createIntConfig("repl-timeout", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_timeout, 60, INTEGER_CONFIG, NULL, NULL), - createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_slave_period, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_replica_period, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("list-compress-depth", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 0, INT_MAX, server.list_compress_depth, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("rdb-key-save-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.rdb_key_save_delay, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("key-load-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.key_load_delay, 0, INTEGER_CONFIG, NULL, NULL), createIntConfig("active-expire-effort", NULL, MODIFIABLE_CONFIG, 1, 10, server.active_expire_effort, 1, INTEGER_CONFIG, NULL, NULL), /* From 1 to 10. */ createIntConfig("hz", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.config_hz, CONFIG_DEFAULT_HZ, INTEGER_CONFIG, NULL, updateHZ), - createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_to_write, 0, INTEGER_CONFIG, NULL, updateGoodSlaves), - createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_slaves_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodSlaves), + createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_replicas_to_write, 0, INTEGER_CONFIG, NULL, updateGoodReplicas), + createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_replicas_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodReplicas), createIntConfig("watchdog-period", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 0, INT_MAX, server.watchdog_period, 0, INTEGER_CONFIG, NULL, updateWatchdogPeriod), createIntConfig("shutdown-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.shutdown_timeout, 10, INTEGER_CONFIG, NULL, NULL), createIntConfig("repl-diskless-sync-max-replicas", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_max_replicas, 0, INTEGER_CONFIG, NULL, NULL), diff --git a/src/db.c b/src/db.c index 2e6d85cf4e..c879b2ffb5 100644 --- a/src/db.c +++ b/src/db.c @@ -88,8 +88,8 @@ void updateLFU(robj *val) { * * Note: this function also returns NULL if the key is logically expired but * still existing, in case this is a replica and the LOOKUP_WRITE is not set. - * Even if the key expiry is master-driven, we can correctly report a key is - * expired on replicas even if the master is lagging expiring our key via DELs + * Even if the key expiry is primary-driven, we can correctly report a key is + * expired on replicas even if the primary is lagging expiring our key via DELs * in the replication link. */ robj *lookupKey(serverDb *db, robj *key, int flags) { dictEntry *de = dbFind(db, key->ptr); @@ -97,14 +97,14 @@ robj *lookupKey(serverDb *db, robj *key, int flags) { if (de) { val = dictGetVal(de); /* Forcing deletion of expired keys on a replica makes the replica - * inconsistent with the master. We forbid it on readonly replicas, but + * inconsistent with the primary. We forbid it on readonly replicas, but * we have to allow it on writable replicas to make write commands * behave consistently. * * It's possible that the WRITE flag is set even during a readonly * command, since the command may trigger events that cause modules to * perform additional writes. */ - int is_ro_replica = server.masterhost && server.repl_slave_ro; + int is_ro_replica = server.primary_host && server.repl_replica_ro; int expire_flags = 0; if (flags & LOOKUP_WRITE && !is_ro_replica) expire_flags |= EXPIRE_FORCE_DELETE_EXPIRED; if (flags & LOOKUP_NOEXPIRE) expire_flags |= EXPIRE_AVOID_DELETE_EXPIRED; @@ -361,10 +361,10 @@ robj *dbRandomKey(serverDb *db) { key = dictGetKey(de); keyobj = createStringObject(key, sdslen(key)); if (dbFindExpires(db, key)) { - if (allvolatile && server.masterhost && --maxtries == 0) { + if (allvolatile && server.primary_host && --maxtries == 0) { /* If the DB is composed only of keys with an expire set, * it could happen that all the keys are already logically - * expired in the slave, so the function cannot stop because + * expired in the repilca, so the function cannot stop because * expireIfNeeded() is false, nor it can stop because * dictGetFairRandomKey() returns NULL (there are keys to return). * To prevent the infinite loop we do some tries, but if there @@ -540,7 +540,7 @@ long long emptyData(int dbnum, int flags, void(callback)(dict *)) { /* Empty the database structure. */ removed = emptyDbStructure(server.db, dbnum, async, callback); - if (dbnum == -1) flushSlaveKeysWithExpireList(); + if (dbnum == -1) flushReplicaKeysWithExpireList(); if (with_functions) { serverAssert(dbnum == -1); @@ -673,7 +673,7 @@ void flushAllDataAndResetRDB(int flags) { if (server.saveparamslen > 0) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE); + rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE); } #if defined(USE_JEMALLOC) @@ -1610,7 +1610,7 @@ void swapMainDbWithTempDb(serverDb *tempDb) { } trackingInvalidateKeysOnFlush(1); - flushSlaveKeysWithExpireList(); + flushReplicaKeysWithExpireList(); } /* SWAPDB db1 db2 */ @@ -1666,8 +1666,8 @@ void setExpire(client *c, serverDb *db, robj *key, long long when) { dictSetSignedIntegerVal(de, when); } - int writable_slave = server.masterhost && server.repl_slave_ro == 0; - if (c && writable_slave && !(c->flags & CLIENT_MASTER)) rememberSlaveKeyWithExpire(db, key); + int writable_replica = server.primary_host && server.repl_replica_ro == 0; + if (c && writable_replica && !(c->flags & CLIENT_PRIMARY)) rememberReplicaKeyWithExpire(db, key); } /* Return the expire time of the specified key, or -1 if no expire @@ -1694,7 +1694,7 @@ void deleteExpiredKeyAndPropagate(serverDb *db, robj *keyobj) { } /* Propagate an implicit key deletion into replicas and the AOF file. - * When a key was deleted in the master by eviction, expiration or a similar + * When a key was deleted in the primary by eviction, expiration or a similar * mechanism a DEL/UNLINK operation for this key is sent * to all the replicas and the AOF file if enabled. * @@ -1720,7 +1720,7 @@ void propagateDeletion(serverDb *db, robj *key, int lazy) { incrRefCount(argv[0]); incrRefCount(argv[1]); - /* If the master decided to delete a key we must propagate it to replicas no matter what. + /* If the primary decided to delete a key we must propagate it to replicas no matter what. * Even if module executed a command without asking for propagation. */ int prev_replication_allowed = server.replication_allowed; server.replication_allowed = 1; @@ -1755,13 +1755,13 @@ int keyIsExpired(serverDb *db, robj *key) { * * The behavior of the function depends on the replication role of the * instance, because by default replicas do not delete expired keys. They - * wait for DELs from the master for consistency matters. However even + * wait for DELs from the primary for consistency matters. However even * replicas will try to have a coherent return value for the function, * so that read commands executed in the replica side will be able to * behave like if the key is expired even if still present (because the - * master has yet to propagate the DEL). + * primary has yet to propagate the DEL). * - * In masters as a side effect of finding a key which is expired, such + * In primary as a side effect of finding a key which is expired, such * key will be evicted from the database. Also this may trigger the * propagation of a DEL/UNLINK command in AOF / replication stream. * @@ -1769,7 +1769,7 @@ int keyIsExpired(serverDb *db, robj *key) { * it still returns KEY_EXPIRED if the key is logically expired. To force deletion * of logically expired keys even on replicas, use the EXPIRE_FORCE_DELETE_EXPIRED * flag. Note though that if the current client is executing - * replicated commands from the master, keys are never considered expired. + * replicated commands from the primary, keys are never considered expired. * * On the other hand, if you just want expiration check, but need to avoid * the actual key deletion and propagation of the deletion, use the @@ -1784,7 +1784,7 @@ keyStatus expireIfNeeded(serverDb *db, robj *key, int flags) { /* If we are running in the context of a replica, instead of * evicting the expired key from the database, we return ASAP: - * the replica key expiration is controlled by the master that will + * the replica key expiration is controlled by the primary that will * send us synthesized DEL operations for expired keys. The * exception is when write operations are performed on writable * replicas. @@ -1793,15 +1793,15 @@ keyStatus expireIfNeeded(serverDb *db, robj *key, int flags) { * that is, KEY_VALID if we think the key should still be valid, * KEY_EXPIRED if we think the key is expired but don't want to delete it at this time. * - * When replicating commands from the master, keys are never considered + * When replicating commands from the primary, keys are never considered * expired. */ - if (server.masterhost != NULL) { - if (server.current_client && (server.current_client->flags & CLIENT_MASTER)) return KEY_VALID; + if (server.primary_host != NULL) { + if (server.current_client && (server.current_client->flags & CLIENT_PRIMARY)) return KEY_VALID; if (!(flags & EXPIRE_FORCE_DELETE_EXPIRED)) return KEY_EXPIRED; } /* In some cases we're explicitly instructed to return an indication of a - * missing key without actually deleting it, even on masters. */ + * missing key without actually deleting it, even on primaries. */ if (flags & EXPIRE_AVOID_DELETE_EXPIRED) return KEY_EXPIRED; /* If 'expire' action is paused, for whatever reason, then don't expire any key. @@ -1894,7 +1894,7 @@ unsigned long long dbScan(serverDb *db, unsigned long long cursor, dictScanFunct * the result, and can be called repeatedly to enlarge the result array. */ keyReference *getKeysPrepareResult(getKeysResult *result, int numkeys) { - /* GETKEYS_RESULT_INIT initializes keys to NULL, point it to the pre-allocated stack + /* initGetKeysResult initializes keys to NULL, point it to the pre-allocated stack * buffer here. */ if (!result->keys) { serverAssert(!result->numkeys); diff --git a/src/debug.c b/src/debug.c index 51e9c6e9f6..bc7f1c9e7e 100644 --- a/src/debug.c +++ b/src/debug.c @@ -429,6 +429,9 @@ void debugCommand(client *c) { " Show low level info about `key` and associated value.", "DROP-CLUSTER-PACKET-FILTER ", " Drop all packets that match the filtered type. Set to -1 allow all packets.", + "CLOSE-CLUSTER-LINK-ON-PACKET-DROP <0|1>", + " This is effective only when DROP-CLUSTER-PACKET-FILTER is set to a valid packet type.", + " When set to 1, the cluster link is closed after dropping a packet based on the filter.", "OOM", " Crash the server simulating an out-of-memory error.", "PANIC", @@ -552,7 +555,7 @@ void debugCommand(client *c) { if (save) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - if (rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) != C_OK) { + if (rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) != C_OK) { addReplyErrorObject(c, shared.err); return; } @@ -591,7 +594,10 @@ void debugCommand(client *c) { } else if (!strcasecmp(c->argv[1]->ptr, "drop-cluster-packet-filter") && c->argc == 3) { long packet_type; if (getLongFromObjectOrReply(c, c->argv[2], &packet_type, NULL) != C_OK) return; - server.cluster_drop_packet_filter = packet_type; + server.debug_cluster_drop_packet_filter = packet_type; + addReply(c, shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr, "close-cluster-link-on-packet-drop") && c->argc == 3) { + server.debug_cluster_close_link_on_packet_drop = atoi(c->argv[2]->ptr) != 0; addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "object") && c->argc == 3) { dictEntry *de; @@ -845,7 +851,7 @@ void debugCommand(client *c) { server.aof_flush_sleep = atoi(c->argv[2]->ptr); addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "replicate") && c->argc >= 3) { - replicationFeedSlaves(-1, c->argv + 2, c->argc - 2); + replicationFeedReplicas(-1, c->argv + 2, c->argc - 2); addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "error") && c->argc == 3) { sds errstr = sdsnewlen("-", 1); @@ -925,7 +931,7 @@ void debugCommand(client *c) { addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "stringmatch-test") && c->argc == 2) { stringmatchlen_fuzz_test(); - addReplyStatus(c, "Apparently Redis did not crash: test passed"); + addReplyStatus(c, "Apparently the server did not crash: test passed"); } else if (!strcasecmp(c->argv[1]->ptr, "set-disable-deny-scripts") && c->argc == 3) { server.script_disable_deny_script = atoi(c->argv[2]->ptr); addReply(c, shared.ok); @@ -966,7 +972,7 @@ void debugCommand(client *c) { return; #endif } else if (!strcasecmp(c->argv[1]->ptr, "pause-cron") && c->argc == 3) { - server.pause_cron = atoi(c->argv[2]->ptr); + server.debug_pause_cron = atoi(c->argv[2]->ptr) != 0; addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "replybuffer") && c->argc == 4) { if (!strcasecmp(c->argv[2]->ptr, "peak-reset-time")) { diff --git a/src/dict.c b/src/dict.c index 119c60ab57..bc92d49564 100644 --- a/src/dict.c +++ b/src/dict.c @@ -48,6 +48,8 @@ #include "serverassert.h" #include "monotonic.h" +#define UNUSED(V) ((void)V) + /* Using dictSetResizeEnabled() we make possible to disable * resizing and rehashing of the hash table as needed. This is very important * for us, as we use copy-on-write and don't want to move too much memory @@ -800,8 +802,9 @@ void dictSetKey(dict *d, dictEntry *de, void *key) { } void dictSetVal(dict *d, dictEntry *de, void *val) { + UNUSED(d); assert(entryHasValue(de)); - de->v.val = d->type->valDup ? d->type->valDup(d, val) : val; + de->v.val = val; } void dictSetSignedIntegerVal(dictEntry *de, int64_t val) { @@ -940,6 +943,8 @@ unsigned long long dictFingerprint(dict *d) { return hash; } +/* Initiaize a normal iterator. This function should be called when initializing + * an iterator on the stack. */ void dictInitIterator(dictIterator *iter, dict *d) { iter->d = d; iter->table = 0; @@ -949,6 +954,8 @@ void dictInitIterator(dictIterator *iter, dict *d) { iter->nextEntry = NULL; } +/* Initialize a safe iterator, which is allowed to modify the dictionary while iterating. + * You must call dictResetIterator when you are done with a safe iterator. */ void dictInitSafeIterator(dictIterator *iter, dict *d) { dictInitIterator(iter, d); iter->safe = 1; @@ -956,9 +963,10 @@ void dictInitSafeIterator(dictIterator *iter, dict *d) { void dictResetIterator(dictIterator *iter) { if (!(iter->index == -1 && iter->table == 0)) { - if (iter->safe) + if (iter->safe) { dictResumeRehashing(iter->d); - else + assert(iter->d->pauserehash >= 0); + } else assert(iter->fingerprint == dictFingerprint(iter->d)); } } @@ -1745,7 +1753,7 @@ char *stringFromLongLong(long long value) { return s; } -dictType BenchmarkDictType = {hashCallback, NULL, NULL, compareCallback, freeCallback, NULL, NULL}; +dictType BenchmarkDictType = {hashCallback, NULL, compareCallback, freeCallback, NULL, NULL}; #define start_benchmark() start = timeInMilliseconds() #define end_benchmark(msg) \ diff --git a/src/dict.h b/src/dict.h index 7ba22edf1e..723e5a54c2 100644 --- a/src/dict.h +++ b/src/dict.h @@ -54,7 +54,6 @@ typedef struct dictType { /* Callbacks */ uint64_t (*hashFunction)(const void *key); void *(*keyDup)(dict *d, const void *key); - void *(*valDup)(dict *d, const void *obj); int (*keyCompare)(dict *d, const void *key1, const void *key2); void (*keyDestructor)(dict *d, void *key); void (*valDestructor)(dict *d, void *obj); diff --git a/src/eval.c b/src/eval.c index d9c2c183d6..f4d09a5aa6 100644 --- a/src/eval.c +++ b/src/eval.c @@ -71,7 +71,6 @@ static uint64_t dictStrCaseHash(const void *key) { dictType shaScriptObjectDictType = { dictStrCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictLuaScriptDestructor, /* val destructor */ @@ -100,7 +99,7 @@ struct ldbState { int bp[LDB_BREAKPOINTS_MAX]; /* An array of breakpoints line numbers. */ int bpcount; /* Number of valid entries inside bp. */ int step; /* Stop at next line regardless of breakpoints. */ - int luabp; /* Stop at next line because redis.breakpoint() was called. */ + int luabp; /* Stop at next line because server.breakpoint() was called. */ sds *src; /* Lua script source code split by line. */ int lines; /* Number of lines in 'src'. */ int currentline; /* Current line number. */ @@ -115,7 +114,7 @@ struct ldbState { /* Perform the SHA1 of the input string. We use this both for hashing script * bodies in order to obtain the Lua function name, and in the implementation - * of redis.sha1(). + * of server.sha1(). * * 'digest' should point to a 41 bytes buffer: 40 for SHA1 converted into an * hexadecimal number, plus 1 byte for null term. */ @@ -136,12 +135,12 @@ void sha1hex(char *digest, char *script, size_t len) { digest[40] = '\0'; } -/* redis.breakpoint() +/* server.breakpoint() * * Allows to stop execution during a debugging session from within * the Lua code implementation, like if a breakpoint was set in the code * immediately after the function. */ -int luaRedisBreakpointCommand(lua_State *lua) { +int luaServerBreakpointCommand(lua_State *lua) { if (ldb.active) { ldb.luabp = 1; lua_pushboolean(lua, 1); @@ -151,12 +150,12 @@ int luaRedisBreakpointCommand(lua_State *lua) { return 1; } -/* redis.debug() +/* server.debug() * * Log a string message into the output console. * Can take multiple arguments that will be separated by commas. * Nothing is returned to the caller. */ -int luaRedisDebugCommand(lua_State *lua) { +int luaServerDebugCommand(lua_State *lua) { if (!ldb.active) return 0; int argc = lua_gettop(lua); sds log = sdscatprintf(sdsempty(), " line %d: ", ldb.currentline); @@ -168,14 +167,14 @@ int luaRedisDebugCommand(lua_State *lua) { return 0; } -/* redis.replicate_commands() +/* server.replicate_commands() * * DEPRECATED: Now do nothing and always return true. * Turn on single commands replication if the script never called * a write command so far, and returns true. Otherwise if the script * already started to write, returns false and stick to whole scripts * replication, which is our default. */ -int luaRedisReplicateCommandsCommand(lua_State *lua) { +int luaServerReplicateCommandsCommand(lua_State *lua) { lua_pushboolean(lua, 1); return 1; } @@ -206,27 +205,27 @@ void scriptingInit(int setup) { lctx.lua_scripts_lru_list = listCreate(); lctx.lua_scripts_mem = 0; - luaRegisterRedisAPI(lua); + luaRegisterServerAPI(lua); /* register debug commands */ - lua_getglobal(lua, "redis"); + lua_getglobal(lua, "server"); - /* redis.breakpoint */ + /* server.breakpoint */ lua_pushstring(lua, "breakpoint"); - lua_pushcfunction(lua, luaRedisBreakpointCommand); + lua_pushcfunction(lua, luaServerBreakpointCommand); lua_settable(lua, -3); - /* redis.debug */ + /* server.debug */ lua_pushstring(lua, "debug"); - lua_pushcfunction(lua, luaRedisDebugCommand); + lua_pushcfunction(lua, luaServerDebugCommand); lua_settable(lua, -3); - /* redis.replicate_commands */ + /* server.replicate_commands */ lua_pushstring(lua, "replicate_commands"); - lua_pushcfunction(lua, luaRedisReplicateCommandsCommand); + lua_pushcfunction(lua, luaServerReplicateCommandsCommand); lua_settable(lua, -3); - lua_setglobal(lua, "redis"); + lua_setglobal(lua, "server"); /* Add a helper function we use for pcall error reporting. * Note that when the error is in the C function we want to report the @@ -448,6 +447,13 @@ sds luaCreateFunction(client *c, robj *body, int evalsha) { sha1hex(funcname + 2, body->ptr, sdslen(body->ptr)); if ((de = dictFind(lctx.lua_scripts, funcname + 2)) != NULL) { + /* If the script was previously added via EVAL, we promote it to + * SCRIPT LOAD, prevent it from being evicted later. */ + luaScript *l = dictGetVal(de); + if (evalsha && l->node) { + listDelNode(lctx.lua_scripts_lru_list, l->node); + l->node = NULL; + } return dictGetKey(de); } @@ -676,6 +682,8 @@ void scriptCommand(client *c) { " Kill the currently executing Lua script.", "LOAD