diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b64798a6f9..508565d296 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make # build with TLS module just for compilation coverage - run: make SANITIZER=address SERVER_CFLAGS='-Werror -DDEBUG_ASSERTIONS' BUILD_TLS=module + run: make SANITIZER=address SERVER_CFLAGS='-Werror' BUILD_TLS=module - name: testprep run: sudo apt-get install tcl8.6 tclx -y - name: test @@ -78,16 +78,14 @@ jobs: - name: make run: make SERVER_CFLAGS='-Werror' MALLOC=libc - build-centos7-jemalloc: + build-almalinux8-jemalloc: runs-on: ubuntu-latest - container: centos:7 + container: almalinux:8 steps: - # on centos7, actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make run: | - yum -y install gcc make - make SERVER_CFLAGS='-Werror' + dnf -y install epel-release gcc make procps-ng which + make -j SERVER_CFLAGS='-Werror' diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml new file mode 100644 index 0000000000..cb76216d4e --- /dev/null +++ b/.github/workflows/clang-format.yml @@ -0,0 +1,48 @@ +name: Clang Format Check + +on: + pull_request: + paths: + - 'src/**' + +jobs: + clang-format-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Set up Clang + run: | + sudo apt-get update -y + sudo apt-get upgrade -y + sudo apt-get install software-properties-common -y + wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | gpg --dearmor | sudo tee /usr/share/keyrings/llvm-toolchain.gpg > /dev/null + echo "deb [signed-by=/usr/share/keyrings/llvm-toolchain.gpg] http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-18 main" | sudo tee /etc/apt/sources.list.d/llvm.list + sudo apt-get update -y + sudo apt-get install clang-format-18 -y + - name: Run clang-format + id: clang-format + run: | + # Run clang-format and capture the diff + cd src + shopt -s globstar + clang-format-18 -i **/*.c **/*.h + # Capture the diff output + DIFF=$(git diff) + if [ ! -z "$DIFF" ]; then + # Encode the diff in Base64 to ensure it's handled as a single line + ENCODED_DIFF=$(echo "$DIFF" | base64 -w 0) + echo "diff=$ENCODED_DIFF" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: Check for formatting changes + if: ${{ steps.clang-format.outputs.diff }} + run: | + echo "ERROR: Code is not formatted correctly. Here is the diff:" + # Decode the Base64 diff to display it + echo "${{ steps.clang-format.outputs.diff }}" | base64 --decode + exit 1 + shell: bash diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 356178f097..ff7a9ad67b 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -11,7 +11,7 @@ on: inputs: skipjobs: description: 'jobs to skip (delete the ones you wanna keep, do not leave empty)' - default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,centos,malloc,specific,fortify,reply-schema' + default: 'valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,rpm-distros,malloc,specific,fortify,reply-schema' skiptests: description: 'tests to skip (delete the ones you wanna keep, do not leave empty)' default: 'valkey,modules,sentinel,cluster,unittest' @@ -598,7 +598,7 @@ jobs: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: make - run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror -DDEBUG_ASSERTIONS' + run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror' - name: testprep run: | sudo apt-get update @@ -672,13 +672,34 @@ jobs: if: true && !contains(github.event.inputs.skiptests, 'unittest') run: ./src/valkey-unit-tests --accurate - test-centos7-jemalloc: - runs-on: ubuntu-latest + test-rpm-distros-jemalloc: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'centos') - container: centos:7 + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'rpm-distros') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-jemalloc + container: almalinux:8 + install_epel: true + - name: test-almalinux9-jemalloc + container: almalinux:8 + install_epel: true + - name: test-centosstream9-jemalloc + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-jemalloc + container: fedora:latest + - name: test-fedorarawhide-jemalloc + container: fedora:rawhide + + name: ${{ matrix.name }} + runs-on: ubuntu-latest + + container: ${{ matrix.container }} timeout-minutes: 14400 + steps: - name: prep if: github.event_name == 'workflow_dispatch' @@ -689,18 +710,19 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - # On centos7 actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release - name: make run: | - yum -y install gcc make - make SERVER_CFLAGS='-Werror' + dnf -y install gcc make procps-ng which /usr/bin/kill + make -j SERVER_CFLAGS='-Werror' - name: testprep - run: yum -y install which tcl tclx + run: dnf -y install tcl tcltls - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -714,13 +736,34 @@ jobs: if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - test-centos7-tls-module: - runs-on: ubuntu-latest + test-rpm-distros-tls-module: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'tls') - container: centos:7 + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'tls') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-tls-module + container: almalinux:8 + install_epel: true + - name: test-almalinux9-tls-module + container: almalinux:8 + install_epel: true + - name: test-centosstream9-tls-module + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-tls-module + container: fedora:latest + - name: test-fedorarawhide-tls-module + container: fedora:rawhide + + name: ${{ matrix.name }} + runs-on: ubuntu-latest + + container: ${{ matrix.container }} timeout-minutes: 14400 + steps: - name: prep if: github.event_name == 'workflow_dispatch' @@ -731,20 +774,20 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - # On centos7 actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release - name: make run: | - yum -y install centos-release-scl epel-release - yum -y install devtoolset-7 openssl-devel openssl - scl enable devtoolset-7 "make BUILD_TLS=module SERVER_CFLAGS='-Werror'" + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill + make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' - name: testprep run: | - yum -y install tcl tcltls tclx + dnf -y install tcl tcltls ./utils/gen-test-certs.sh - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') @@ -763,13 +806,34 @@ jobs: run: | ./runtest-cluster --tls-module ${{github.event.inputs.cluster_test_args}} - test-centos7-tls-module-no-tls: - runs-on: ubuntu-latest + test-rpm-distros-tls-module-no-tls: if: | - (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && - !contains(github.event.inputs.skipjobs, 'tls') - container: centos:7 + (github.event_name == 'workflow_dispatch' || (github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey')) && + !contains(github.event.inputs.skipjobs, 'tls') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-tls-module-no-tls + container: almalinux:8 + install_epel: true + - name: test-almalinux9-tls-module-no-tls + container: almalinux:8 + install_epel: true + - name: test-centosstream9-tls-module-no-tls + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-tls-module-no-tls + container: fedora:latest + - name: test-fedorarawhide-tls-module-no-tls + container: fedora:rawhide + + name: ${{ matrix.name }} + runs-on: ubuntu-latest + + container: ${{ matrix.container }} timeout-minutes: 14400 + steps: - name: prep if: github.event_name == 'workflow_dispatch' @@ -780,20 +844,20 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - # On centos7 actions/checkout@v4 does not work, so we use v3 - # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release - name: make run: | - yum -y install centos-release-scl epel-release - yum -y install devtoolset-7 openssl-devel openssl - scl enable devtoolset-7 "make BUILD_TLS=module SERVER_CFLAGS='-Werror'" + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill + make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' - name: testprep run: | - yum -y install tcl tcltls tclx + dnf -y install tcl tcltls ./utils/gen-test-certs.sh - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') @@ -1074,7 +1138,7 @@ jobs: notify-about-job-results: runs-on: ubuntu-latest if: always() && github.event_name != 'workflow_dispatch' && github.repository == 'valkey-io/valkey' - needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-centos7-jemalloc, test-centos7-tls-module, test-centos7-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator] + needs: [test-ubuntu-jemalloc, test-ubuntu-jemalloc-fortify, test-ubuntu-libc-malloc, test-ubuntu-no-malloc-usable-size, test-ubuntu-32bit, test-ubuntu-tls, test-ubuntu-tls-no-tls, test-ubuntu-io-threads, test-ubuntu-reclaim-cache, test-valgrind-test, test-valgrind-misc, test-valgrind-no-malloc-usable-size-test, test-valgrind-no-malloc-usable-size-misc, test-sanitizer-address, test-sanitizer-undefined, test-rpm-distros-jemalloc, test-rpm-distros-tls-module, test-rpm-distros-tls-module-no-tls, test-macos-latest, test-macos-latest-sentinel, test-macos-latest-cluster, build-macos, test-freebsd, test-alpine-jemalloc, test-alpine-libc-malloc, reply-schemas-validator] steps: - name: Collect job status run: | diff --git a/.gitignore b/.gitignore index 8ed98aa326..a1b72a462e 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,7 @@ Makefile.dep compile_commands.json redis.code-workspace .cache +.cscope* +.swp +nodes.conf +tests/cluster/tmp/* diff --git a/COPYING b/COPYING index 10928babb3..2058f57e56 100644 --- a/COPYING +++ b/COPYING @@ -1,5 +1,7 @@ # License 1 +BSD 3-Clause License + Copyright (c) 2024-present, Valkey contributors All rights reserved. @@ -13,6 +15,8 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # License 2 +BSD 3-Clause License + Copyright (c) 2006-2020, Salvatore Sanfilippo All rights reserved. diff --git a/src/Makefile b/src/Makefile index 6defebed8d..302ad06b84 100644 --- a/src/Makefile +++ b/src/Makefile @@ -150,6 +150,11 @@ DEBUG=-g -ggdb # Linux ARM32 needs -latomic at linking time ifneq (,$(findstring armv,$(uname_M))) FINAL_LIBS+=-latomic +else +# Linux POWER needs -latomic at linking time +ifneq (,$(findstring ppc,$(uname_M))) + FINAL_LIBS+=-latomic +endif endif ifeq ($(uname_S),SunOS) diff --git a/src/acl.c b/src/acl.c index 46ed85baf2..533782acad 100644 --- a/src/acl.c +++ b/src/acl.c @@ -506,7 +506,7 @@ void ACLFreeUserAndKillClients(user *u) { * more defensive to set the default user and put * it in non authenticated mode. */ c->user = DefaultUser; - c->authenticated = 0; + c->flags &= ~CLIENT_AUTHENTICATED; /* We will write replies to this client later, so we can't * close it directly even if async. */ if (c == server.current_client) { @@ -1494,7 +1494,7 @@ void addAuthErrReply(client *c, robj *err) { * The return value is AUTH_OK on success (valid username / password pair) & AUTH_ERR otherwise. */ int checkPasswordBasedAuth(client *c, robj *username, robj *password) { if (ACLCheckUserCredentials(username, password) == C_OK) { - c->authenticated = 1; + c->flags |= CLIENT_AUTHENTICATED; c->user = ACLGetUserByName(username->ptr, sdslen(username->ptr)); moduleNotifyUserChanged(c); return AUTH_OK; @@ -1587,12 +1587,10 @@ static int ACLSelectorCheckKey(aclSelector *selector, const char *key, int keyle listRewind(selector->patterns, &li); int key_flags = 0; - /* clang-format off */ if (keyspec_flags & CMD_KEY_ACCESS) key_flags |= ACL_READ_PERMISSION; if (keyspec_flags & CMD_KEY_INSERT) key_flags |= ACL_WRITE_PERMISSION; if (keyspec_flags & CMD_KEY_DELETE) key_flags |= ACL_WRITE_PERMISSION; if (keyspec_flags & CMD_KEY_UPDATE) key_flags |= ACL_WRITE_PERMISSION; - /* clang-format on */ /* Test this key against every pattern. */ while ((ln = listNext(&li))) { @@ -1618,12 +1616,10 @@ static int ACLSelectorHasUnrestrictedKeyAccess(aclSelector *selector, int flags) listRewind(selector->patterns, &li); int access_flags = 0; - /* clang-format off */ if (flags & CMD_KEY_ACCESS) access_flags |= ACL_READ_PERMISSION; if (flags & CMD_KEY_INSERT) access_flags |= ACL_WRITE_PERMISSION; if (flags & CMD_KEY_DELETE) access_flags |= ACL_WRITE_PERMISSION; if (flags & CMD_KEY_UPDATE) access_flags |= ACL_WRITE_PERMISSION; - /* clang-format on */ /* Test this key against every pattern. */ while ((ln = listNext(&li))) { @@ -2428,20 +2424,21 @@ sds ACLLoadFromFile(const char *filename) { client *c = listNodeValue(ln); user *original = c->user; list *channels = NULL; - user *new = ACLGetUserByName(c->user->name, sdslen(c->user->name)); - if (new && user_channels) { - if (!raxFind(user_channels, (unsigned char *)(new->name), sdslen(new->name), (void **)&channels)) { - channels = getUpcomingChannelList(new, original); - raxInsert(user_channels, (unsigned char *)(new->name), sdslen(new->name), channels, NULL); + user *new_user = ACLGetUserByName(c->user->name, sdslen(c->user->name)); + if (new_user && user_channels) { + if (!raxFind(user_channels, (unsigned char *)(new_user->name), sdslen(new_user->name), + (void **)&channels)) { + channels = getUpcomingChannelList(new_user, original); + raxInsert(user_channels, (unsigned char *)(new_user->name), sdslen(new_user->name), channels, NULL); } } /* When the new channel list is NULL, it means the new user's channel list is a superset of the old user's * list. */ - if (!new || (channels && ACLShouldKillPubsubClient(c, channels))) { + if (!new_user || (channels && ACLShouldKillPubsubClient(c, channels))) { freeClient(c); continue; } - c->user = new; + c->user = new_user; } if (user_channels) raxFreeWithCallback(user_channels, (void (*)(void *))listRelease); @@ -2668,15 +2665,13 @@ void addACLLogEntry(client *c, int reason, int context, int argpos, sds username if (object) { le->object = object; } else { - /* clang-format off */ - switch(reason) { + switch (reason) { case ACL_DENIED_CMD: le->object = sdsdup(c->cmd->fullname); break; case ACL_DENIED_KEY: le->object = sdsdup(c->argv[argpos]->ptr); break; case ACL_DENIED_CHANNEL: le->object = sdsdup(c->argv[argpos]->ptr); break; case ACL_DENIED_AUTH: le->object = sdsdup(c->argv[0]->ptr); break; default: le->object = sdsempty(); } - /* clang-format on */ } /* if we have a real client from the network, use it (could be missing on module timers) */ @@ -3057,28 +3052,24 @@ void aclCommand(client *c) { addReplyBulkCString(c, "reason"); char *reasonstr; - /* clang-format off */ - switch(le->reason) { - case ACL_DENIED_CMD: reasonstr="command"; break; - case ACL_DENIED_KEY: reasonstr="key"; break; - case ACL_DENIED_CHANNEL: reasonstr="channel"; break; - case ACL_DENIED_AUTH: reasonstr="auth"; break; - default: reasonstr="unknown"; + switch (le->reason) { + case ACL_DENIED_CMD: reasonstr = "command"; break; + case ACL_DENIED_KEY: reasonstr = "key"; break; + case ACL_DENIED_CHANNEL: reasonstr = "channel"; break; + case ACL_DENIED_AUTH: reasonstr = "auth"; break; + default: reasonstr = "unknown"; } - /* clang-format on */ addReplyBulkCString(c, reasonstr); addReplyBulkCString(c, "context"); char *ctxstr; - /* clang-format off */ - switch(le->context) { - case ACL_LOG_CTX_TOPLEVEL: ctxstr="toplevel"; break; - case ACL_LOG_CTX_MULTI: ctxstr="multi"; break; - case ACL_LOG_CTX_LUA: ctxstr="lua"; break; - case ACL_LOG_CTX_MODULE: ctxstr="module"; break; - default: ctxstr="unknown"; + switch (le->context) { + case ACL_LOG_CTX_TOPLEVEL: ctxstr = "toplevel"; break; + case ACL_LOG_CTX_MULTI: ctxstr = "multi"; break; + case ACL_LOG_CTX_LUA: ctxstr = "lua"; break; + case ACL_LOG_CTX_MODULE: ctxstr = "module"; break; + default: ctxstr = "unknown"; } - /* clang-format on */ addReplyBulkCString(c, ctxstr); addReplyBulkCString(c, "object"); diff --git a/src/aof.c b/src/aof.c index a2ed139f8b..ac9ffd5fcb 100644 --- a/src/aof.c +++ b/src/aof.c @@ -904,12 +904,12 @@ int aofFsyncInProgress(void) { /* Starts a background task that performs fsync() against the specified * file descriptor (the one of the AOF file) in another thread. */ void aof_background_fsync(int fd) { - bioCreateFsyncJob(fd, server.master_repl_offset, 1); + bioCreateFsyncJob(fd, server.primary_repl_offset, 1); } /* Close the fd on the basis of aof_background_fsync. */ void aof_background_fsync_and_close(int fd) { - bioCreateCloseAofJob(fd, server.master_repl_offset, 1); + bioCreateCloseAofJob(fd, server.primary_repl_offset, 1); } /* Kills an AOFRW child process if exists */ @@ -946,7 +946,7 @@ void stopAppendOnly(void) { server.aof_last_incr_size = 0; server.aof_last_incr_fsync_offset = 0; server.fsynced_reploff = -1; - atomicSet(server.fsynced_reploff_pending, 0); + atomic_store_explicit(&server.fsynced_reploff_pending, 0, memory_order_relaxed); killAppendOnlyChild(); sdsfree(server.aof_buf); server.aof_buf = sdsempty(); @@ -985,11 +985,10 @@ int startAppendOnly(void) { } server.aof_last_fsync = server.mstime; /* If AOF fsync error in bio job, we just ignore it and log the event. */ - int aof_bio_fsync_status; - atomicGet(server.aof_bio_fsync_status, aof_bio_fsync_status); + int aof_bio_fsync_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_relaxed); if (aof_bio_fsync_status == C_ERR) { serverLog(LL_WARNING, "AOF reopen, just ignore the AOF fsync error in bio job"); - atomicSet(server.aof_bio_fsync_status, C_OK); + atomic_store_explicit(&server.aof_bio_fsync_status, C_OK, memory_order_relaxed); } /* If AOF was in error state, we just ignore it and log the event. */ @@ -1070,11 +1069,12 @@ void flushAppendOnlyFile(int force) { } else { /* All data is fsync'd already: Update fsynced_reploff_pending just in case. * This is needed to avoid a WAITAOF hang in case a module used RM_Call with the NO_AOF flag, - * in which case master_repl_offset will increase but fsynced_reploff_pending won't be updated + * in which case primary_repl_offset will increase but fsynced_reploff_pending won't be updated * (because there's no reason, from the AOF POV, to call fsync) and then WAITAOF may wait on * the higher offset (which contains data that was only propagated to replicas, and not to AOF) */ if (!sync_in_progress && server.aof_fsync != AOF_FSYNC_NO) - atomicSet(server.fsynced_reploff_pending, server.master_repl_offset); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, + memory_order_relaxed); return; } } @@ -1244,7 +1244,7 @@ void flushAppendOnlyFile(int force) { latencyAddSampleIfNeeded("aof-fsync-always", latency); server.aof_last_incr_fsync_offset = server.aof_last_incr_size; server.aof_last_fsync = server.mstime; - atomicSet(server.fsynced_reploff_pending, server.master_repl_offset); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, memory_order_relaxed); } else if (server.aof_fsync == AOF_FSYNC_EVERYSEC && server.mstime - server.aof_last_fsync >= 1000) { if (!sync_in_progress) { aof_background_fsync(server.aof_fd); @@ -1356,7 +1356,7 @@ struct client *createAOFClient(void) { c->id = CLIENT_ID_AOF; /* So modules can identify it's the AOF client. */ /* - * The AOF client should never be blocked (unlike master + * The AOF client should never be blocked (unlike primary * replication connection). * This is because blocking the AOF client might cause * deadlock (because potentially no one will unblock it). @@ -1366,9 +1366,9 @@ struct client *createAOFClient(void) { */ c->flags = CLIENT_DENY_BLOCKING; - /* We set the fake client as a slave waiting for the synchronization + /* We set the fake client as a replica waiting for the synchronization * so that the server will not try to send replies to this client. */ - c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + c->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; return c; } @@ -1994,21 +1994,19 @@ int rioWriteStreamPendingEntry(rio *r, RETRYCOUNT JUSTID FORCE. */ streamID id; streamDecodeID(rawid, &id); - /* clang-format off */ - if (rioWriteBulkCount(r,'*',12) == 0) return 0; - if (rioWriteBulkString(r,"XCLAIM",6) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,groupname,groupname_len) == 0) return 0; - if (rioWriteBulkString(r,consumer->name,sdslen(consumer->name)) == 0) return 0; - if (rioWriteBulkString(r,"0",1) == 0) return 0; - if (rioWriteBulkStreamID(r,&id) == 0) return 0; - if (rioWriteBulkString(r,"TIME",4) == 0) return 0; - if (rioWriteBulkLongLong(r,nack->delivery_time) == 0) return 0; - if (rioWriteBulkString(r,"RETRYCOUNT",10) == 0) return 0; - if (rioWriteBulkLongLong(r,nack->delivery_count) == 0) return 0; - if (rioWriteBulkString(r,"JUSTID",6) == 0) return 0; - if (rioWriteBulkString(r,"FORCE",5) == 0) return 0; - /* clang-format on */ + if (rioWriteBulkCount(r, '*', 12) == 0) return 0; + if (rioWriteBulkString(r, "XCLAIM", 6) == 0) return 0; + if (rioWriteBulkObject(r, key) == 0) return 0; + if (rioWriteBulkString(r, groupname, groupname_len) == 0) return 0; + if (rioWriteBulkString(r, consumer->name, sdslen(consumer->name)) == 0) return 0; + if (rioWriteBulkString(r, "0", 1) == 0) return 0; + if (rioWriteBulkStreamID(r, &id) == 0) return 0; + if (rioWriteBulkString(r, "TIME", 4) == 0) return 0; + if (rioWriteBulkLongLong(r, nack->delivery_time) == 0) return 0; + if (rioWriteBulkString(r, "RETRYCOUNT", 10) == 0) return 0; + if (rioWriteBulkLongLong(r, nack->delivery_count) == 0) return 0; + if (rioWriteBulkString(r, "JUSTID", 6) == 0) return 0; + if (rioWriteBulkString(r, "FORCE", 5) == 0) return 0; return 1; } @@ -2021,14 +2019,12 @@ int rioWriteStreamEmptyConsumer(rio *r, size_t groupname_len, streamConsumer *consumer) { /* XGROUP CREATECONSUMER */ - /* clang-format off */ - if (rioWriteBulkCount(r,'*',5) == 0) return 0; - if (rioWriteBulkString(r,"XGROUP",6) == 0) return 0; - if (rioWriteBulkString(r,"CREATECONSUMER",14) == 0) return 0; - if (rioWriteBulkObject(r,key) == 0) return 0; - if (rioWriteBulkString(r,groupname,groupname_len) == 0) return 0; - if (rioWriteBulkString(r,consumer->name,sdslen(consumer->name)) == 0) return 0; - /* clang-format on */ + if (rioWriteBulkCount(r, '*', 5) == 0) return 0; + if (rioWriteBulkString(r, "XGROUP", 6) == 0) return 0; + if (rioWriteBulkString(r, "CREATECONSUMER", 14) == 0) return 0; + if (rioWriteBulkObject(r, key) == 0) return 0; + if (rioWriteBulkString(r, groupname, groupname_len) == 0) return 0; + if (rioWriteBulkString(r, consumer->name, sdslen(consumer->name)) == 0) return 0; return 1; } @@ -2321,7 +2317,7 @@ int rewriteAppendOnlyFile(char *filename) { if (server.aof_use_rdb_preamble) { int error; - if (rdbSaveRio(SLAVE_REQ_NONE, &aof, &error, RDBFLAGS_AOF_PREAMBLE, NULL) == C_ERR) { + if (rdbSaveRio(REPLICA_REQ_NONE, &aof, &error, RDBFLAGS_AOF_PREAMBLE, NULL) == C_ERR) { errno = error; goto werr; } @@ -2404,12 +2400,12 @@ int rewriteAppendOnlyFileBackground(void) { * between updates to `fsynced_reploff_pending` of the worker thread, belonging * to the previous AOF, and the new one. This concern is specific for a full * sync scenario where we don't wanna risk the ACKed replication offset - * jumping backwards or forward when switching to a different master. */ + * jumping backwards or forward when switching to a different primary. */ bioDrainWorker(BIO_AOF_FSYNC); /* Set the initial repl_offset, which will be applied to fsynced_reploff * when AOFRW finishes (after possibly being updated by a bio thread) */ - atomicSet(server.fsynced_reploff_pending, server.master_repl_offset); + atomic_store_explicit(&server.fsynced_reploff_pending, server.primary_repl_offset, memory_order_relaxed); server.fsynced_reploff = 0; } @@ -2647,8 +2643,8 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { /* Update the fsynced replication offset that just now become valid. * This could either be the one we took in startAppendOnly, or a * newer one set by the bio thread. */ - long long fsynced_reploff_pending; - atomicGet(server.fsynced_reploff_pending, fsynced_reploff_pending); + long long fsynced_reploff_pending = + atomic_load_explicit(&server.fsynced_reploff_pending, memory_order_relaxed); server.fsynced_reploff = fsynced_reploff_pending; } diff --git a/src/bio.c b/src/bio.c index 4d1d268e62..11692e77ef 100644 --- a/src/bio.c +++ b/src/bio.c @@ -62,6 +62,7 @@ #include "server.h" #include "bio.h" +#include static char *bio_worker_title[] = { "bio_close_file", @@ -257,16 +258,16 @@ void *bioProcessBackgroundJobs(void *arg) { * socket, pipe, or file. We just ignore these errno because * aof fsync did not really fail. */ if (valkey_fsync(job->fd_args.fd) == -1 && errno != EBADF && errno != EINVAL) { - int last_status; - atomicGet(server.aof_bio_fsync_status, last_status); - atomicSet(server.aof_bio_fsync_status, C_ERR); - atomicSet(server.aof_bio_fsync_errno, errno); + int last_status = atomic_load_explicit(&server.aof_bio_fsync_status, memory_order_relaxed); + + atomic_store_explicit(&server.aof_bio_fsync_errno, errno, memory_order_relaxed); + atomic_store_explicit(&server.aof_bio_fsync_status, C_ERR, memory_order_release); if (last_status == C_OK) { serverLog(LL_WARNING, "Fail to fsync the AOF file: %s", strerror(errno)); } } else { - atomicSet(server.aof_bio_fsync_status, C_OK); - atomicSet(server.fsynced_reploff_pending, job->fd_args.offset); + atomic_store_explicit(&server.aof_bio_fsync_status, C_OK, memory_order_relaxed); + atomic_store_explicit(&server.fsynced_reploff_pending, job->fd_args.offset, memory_order_relaxed); } if (job->fd_args.need_reclaim_cache) { diff --git a/src/bitops.c b/src/bitops.c index db975e4dfe..2094bb0ea9 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -784,7 +784,7 @@ void bitopCommand(client *c) { addReplyLongLong(c, maxlen); /* Return the output string length in bytes. */ } -/* BITCOUNT key [start end [BIT|BYTE]] */ +/* BITCOUNT key [start [end [BIT|BYTE]]] */ void bitcountCommand(client *c) { robj *o; long long start, end; @@ -795,9 +795,8 @@ void bitcountCommand(client *c) { unsigned char first_byte_neg_mask = 0, last_byte_neg_mask = 0; /* Parse start/end range if any. */ - if (c->argc == 4 || c->argc == 5) { + if (c->argc == 3 || c->argc == 4 || c->argc == 5) { if (getLongLongFromObjectOrReply(c, c->argv[2], &start, NULL) != C_OK) return; - if (getLongLongFromObjectOrReply(c, c->argv[3], &end, NULL) != C_OK) return; if (c->argc == 5) { if (!strcasecmp(c->argv[4]->ptr, "bit")) isbit = 1; @@ -808,6 +807,10 @@ void bitcountCommand(client *c) { return; } } + if (c->argc >= 4) { + if (getLongLongFromObjectOrReply(c, c->argv[3], &end, NULL) != C_OK) return; + } + /* Lookup, check for type. */ o = lookupKeyRead(c->db, c->argv[1]); if (checkType(c, o, OBJ_STRING)) return; @@ -817,6 +820,8 @@ void bitcountCommand(client *c) { /* Make sure we will not overflow */ serverAssert(totlen <= LLONG_MAX >> 3); + if (c->argc < 4) end = totlen - 1; + /* Convert negative indexes */ if (start < 0 && end < 0 && start > end) { addReply(c, shared.czero); @@ -921,12 +926,7 @@ void bitposCommand(client *c) { long long totlen = strlen; serverAssert(totlen <= LLONG_MAX >> 3); - if (c->argc < 5) { - if (isbit) - end = (totlen << 3) + 7; - else - end = totlen - 1; - } + if (c->argc < 5) end = totlen - 1; if (isbit) totlen <<= 3; /* Convert negative indexes */ diff --git a/src/blocked.c b/src/blocked.c index 0291505cb9..6d8d4fbc7c 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -87,7 +87,7 @@ void initClientBlockingState(client *c) { * and will be processed when the client is unblocked. */ void blockClient(client *c, int btype) { /* Master client should never be blocked unless pause or module */ - serverAssert(!(c->flags & CLIENT_MASTER && btype != BLOCKED_MODULE && btype != BLOCKED_POSTPONE)); + serverAssert(!(c->flags & CLIENT_PRIMARY && btype != BLOCKED_MODULE && btype != BLOCKED_POSTPONE)); c->flags |= CLIENT_BLOCKED; c->bstate.btype = btype; @@ -183,8 +183,7 @@ void queueClientForReprocessing(client *c) { void unblockClient(client *c, int queue_for_reprocessing) { if (c->bstate.btype == BLOCKED_LIST || c->bstate.btype == BLOCKED_ZSET || c->bstate.btype == BLOCKED_STREAM) { unblockClientWaitingData(c); - } else if (c->bstate.btype == BLOCKED_WAIT || c->bstate.btype == BLOCKED_WAITAOF || - c->bstate.btype == BLOCKED_WAIT_PREREPL) { + } else if (c->bstate.btype == BLOCKED_WAIT) { unblockClientWaitingReplicas(c); } else if (c->bstate.btype == BLOCKED_MODULE) { if (moduleClientIsBlockedOnKeys(c)) unblockClientWaitingData(c); @@ -200,8 +199,7 @@ void unblockClient(client *c, int queue_for_reprocessing) { /* Reset the client for a new query, unless the client has pending command to process * or in case a shutdown operation was canceled and we are still in the processCommand sequence */ - if (!(c->flags & CLIENT_PENDING_COMMAND) && c->bstate.btype != BLOCKED_SHUTDOWN && - c->bstate.btype != BLOCKED_WAIT_PREREPL) { + if (!(c->flags & CLIENT_PENDING_COMMAND) && c->bstate.btype != BLOCKED_SHUTDOWN) { freeClientOriginalArgv(c); /* Clients that are not blocked on keys are not reprocessed so we must * call reqresAppendResponse here (for clients blocked on key, @@ -211,11 +209,11 @@ void unblockClient(client *c, int queue_for_reprocessing) { resetClient(c); } + /* We count blocked client stats on regular clients and not on module clients */ + if (!(c->flags & CLIENT_MODULE)) server.blocked_clients--; + server.blocked_clients_by_type[c->bstate.btype]--; /* Clear the flags, and put the client in the unblocked list so that * we'll process new commands in its query buffer ASAP. */ - if (!(c->flags & CLIENT_MODULE)) - server.blocked_clients--; /* We count blocked client stats on regular clients and not on module clients */ - server.blocked_clients_by_type[c->bstate.btype]--; c->flags &= ~CLIENT_BLOCKED; c->bstate.btype = BLOCKED_NONE; c->bstate.unblock_on_nokey = 0; @@ -231,15 +229,19 @@ void replyToBlockedClientTimedOut(client *c) { addReplyNullArray(c); updateStatsOnUnblock(c, 0, 0, 0); } else if (c->bstate.btype == BLOCKED_WAIT) { - addReplyLongLong(c, replicationCountAcksByOffset(c->bstate.reploffset)); - } else if (c->bstate.btype == BLOCKED_WAITAOF) { - addReplyArrayLen(c, 2); - addReplyLongLong(c, server.fsynced_reploff >= c->bstate.reploffset); - addReplyLongLong(c, replicationCountAOFAcksByOffset(c->bstate.reploffset)); + if (c->cmd->proc == waitCommand) { + addReplyLongLong(c, replicationCountAcksByOffset(c->bstate.reploffset)); + } else if (c->cmd->proc == waitaofCommand) { + addReplyArrayLen(c, 2); + addReplyLongLong(c, server.fsynced_reploff >= c->bstate.reploffset); + addReplyLongLong(c, replicationCountAOFAcksByOffset(c->bstate.reploffset)); + } else if (c->cmd->proc == clusterCommand) { + addReplyErrorObject(c, shared.noreplicaserr); + } else { + serverPanic("Unknown wait command %s in replyToBlockedClientTimedOut().", c->cmd->declared_name); + } } else if (c->bstate.btype == BLOCKED_MODULE) { moduleBlockedClientTimedOut(c, 0); - } else if (c->bstate.btype == BLOCKED_WAIT_PREREPL) { - addReplyErrorObject(c, shared.noreplicaserr); } else { serverPanic("Unknown btype in replyToBlockedClientTimedOut()."); } @@ -263,8 +265,8 @@ void replyToClientsBlockedOnShutdown(void) { /* Mass-unblock clients because something changed in the instance that makes * blocking no longer safe. For example clients blocked in list operations - * in an instance which turns from master to slave is unsafe, so this function - * is called when a master turns into a slave. + * in an instance which turns from master to replica is unsafe, so this function + * is called when a master turns into a replica. * * The semantics is to send an -UNBLOCKED error to the client, disconnecting * it at the same time. */ @@ -585,29 +587,13 @@ static void handleClientsBlockedOnKey(readyList *rl) { } /* block a client for replica acknowledgement */ -void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, long numreplicas, int btype, int numlocal) { +void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, long numreplicas, int numlocal) { c->bstate.timeout = timeout; c->bstate.reploffset = offset; c->bstate.numreplicas = numreplicas; c->bstate.numlocal = numlocal; listAddNodeHead(server.clients_waiting_acks, c); - blockClient(c, btype); -} - -/* block a client due to pre-replication */ -void blockForPreReplication(client *c, mstime_t timeout, long long offset, long numreplicas) { - blockClientForReplicaAck(c, timeout, offset, numreplicas, BLOCKED_WAIT_PREREPL, 0); - c->flags |= CLIENT_PENDING_COMMAND; -} - -/* block a client due to wait command */ -void blockForReplication(client *c, mstime_t timeout, long long offset, long numreplicas) { - blockClientForReplicaAck(c, timeout, offset, numreplicas, BLOCKED_WAIT, 0); -} - -/* block a client due to waitaof command */ -void blockForAofFsync(client *c, mstime_t timeout, long long offset, int numlocal, long numreplicas) { - blockClientForReplicaAck(c, timeout, offset, numreplicas, BLOCKED_WAITAOF, numlocal); + blockClient(c, BLOCKED_WAIT); } /* Postpone client from executing a command. For example the server might be busy diff --git a/src/cluster.c b/src/cluster.c index d30d7e19b5..3a4dccdff5 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -900,7 +900,6 @@ void clusterCommand(client *c) { } kvstoreReleaseDictIterator(kvs_di); } else if ((!strcasecmp(c->argv[1]->ptr, "slaves") || !strcasecmp(c->argv[1]->ptr, "replicas")) && c->argc == 3) { - /* CLUSTER SLAVES */ /* CLUSTER REPLICAS */ clusterNode *n = clusterLookupNode(c->argv[2]->ptr, sdslen(c->argv[2]->ptr)); int j; @@ -911,15 +910,15 @@ void clusterCommand(client *c) { return; } - if (clusterNodeIsSlave(n)) { + if (clusterNodeIsReplica(n)) { addReplyError(c, "The specified node is not a master"); return; } /* Report TLS ports to TLS client, and report non-TLS port to non-TLS client. */ - addReplyArrayLen(c, clusterNodeNumSlaves(n)); - for (j = 0; j < clusterNodeNumSlaves(n); j++) { - sds ni = clusterGenNodeDescription(c, clusterNodeGetSlave(n, j), shouldReturnTlsInfo()); + addReplyArrayLen(c, clusterNodeNumReplicas(n)); + for (j = 0; j < clusterNodeNumReplicas(n); j++) { + sds ni = clusterGenNodeDescription(c, clusterNodeGetReplica(n, j), shouldReturnTlsInfo()); addReplyBulkCString(c, ni); sdsfree(ni); } @@ -1048,10 +1047,12 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int * can safely serve the request, otherwise we return a TRYAGAIN * error). To do so we set the importing/migrating state and * increment a counter for every missing key. */ - if (n == myself && getMigratingSlotDest(slot) != NULL) { - migrating_slot = 1; - } else if (getImportingSlotSource(slot) != NULL) { - importing_slot = 1; + if (clusterNodeIsPrimary(myself) || c->flags & CLIENT_READONLY) { + if (n == clusterNodeGetPrimary(myself) && getMigratingSlotDest(slot) != NULL) { + migrating_slot = 1; + } else if (getImportingSlotSource(slot) != NULL) { + importing_slot = 1; + } } } else { /* If it is not the first key/channel, make sure it is exactly @@ -1120,7 +1121,9 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int /* MIGRATE always works in the context of the local node if the slot * is open (migrating or importing state). We need to be able to freely * move keys among instances in this case. */ - if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand) return myself; + if ((migrating_slot || importing_slot) && cmd->proc == migrateCommand && clusterNodeIsPrimary(myself)) { + return myself; + } /* If we don't have all the keys and we are migrating the slot, send * an ASK redirection or TRYAGAIN. */ @@ -1148,13 +1151,13 @@ getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, int argc, int } } - /* Handle the read-only client case reading from a slave: if this - * node is a slave and the request is about a hash slot our master + /* Handle the read-only client case reading from a replica: if this + * node is a replica and the request is about a hash slot our primary * is serving, we can reply without redirection. */ int is_write_command = (cmd_flags & CMD_WRITE) || (c->cmd->proc == execCommand && (c->mstate.cmd_flags & CMD_WRITE)); - if (((c->flags & CLIENT_READONLY) || pubsubshard_included) && !is_write_command && clusterNodeIsSlave(myself) && - clusterNodeGetMaster(myself) == n) { + if (((c->flags & CLIENT_READONLY) || pubsubshard_included) && !is_write_command && clusterNodeIsReplica(myself) && + clusterNodeGetPrimary(myself) == n) { return myself; } @@ -1200,7 +1203,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co * to detect timeouts, in order to handle the following case: * * 1) A client blocks with BLPOP or similar blocking operation. - * 2) The master migrates the hash slot elsewhere or turns into a slave. + * 2) The primary migrates the hash slot elsewhere or turns into a replica. * 3) The client may remain blocked forever (or up to the max timeout time) * waiting for a key change that will never happen. * @@ -1236,8 +1239,8 @@ int clusterRedirectBlockedClientIfNeeded(client *c) { /* if the client is read-only and attempting to access key that our * replica can handle, allow it. */ - if ((c->flags & CLIENT_READONLY) && !(c->lastcmd->flags & CMD_WRITE) && clusterNodeIsSlave(myself) && - clusterNodeGetMaster(myself) == node) { + if ((c->flags & CLIENT_READONLY) && !(c->lastcmd->flags & CMD_WRITE) && clusterNodeIsReplica(myself) && + clusterNodeGetPrimary(myself) == node) { node = myself; } @@ -1327,9 +1330,9 @@ int isNodeAvailable(clusterNode *node) { } void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, int end_slot) { - int i, nested_elements = 3; /* slots (2) + master addr (1) */ - for (i = 0; i < clusterNodeNumSlaves(node); i++) { - if (!isNodeAvailable(clusterNodeGetSlave(node, i))) continue; + int i, nested_elements = 3; /* slots (2) + primary addr (1) */ + for (i = 0; i < clusterNodeNumReplicas(node); i++) { + if (!isNodeAvailable(clusterNodeGetReplica(node, i))) continue; nested_elements++; } addReplyArrayLen(c, nested_elements); @@ -1338,11 +1341,11 @@ void addNodeReplyForClusterSlot(client *c, clusterNode *node, int start_slot, in addNodeToNodeReply(c, node); /* Remaining nodes in reply are replicas for slot range */ - for (i = 0; i < clusterNodeNumSlaves(node); i++) { + for (i = 0; i < clusterNodeNumReplicas(node); i++) { /* This loop is copy/pasted from clusterGenNodeDescription() * with modifications for per-slot node aggregation. */ - if (!isNodeAvailable(clusterNodeGetSlave(node, i))) continue; - addNodeToNodeReply(c, clusterNodeGetSlave(node, i)); + if (!isNodeAvailable(clusterNodeGetReplica(node, i))) continue; + addNodeToNodeReply(c, clusterNodeGetReplica(node, i)); nested_elements--; } serverAssert(nested_elements == 3); /* Original 3 elements */ @@ -1360,7 +1363,7 @@ void clearCachedClusterSlotsResponse(void) { sds generateClusterSlotResponse(void) { client *recording_client = createCachedResponseClient(); clusterNode *n = NULL; - int num_masters = 0, start = -1; + int num_primaries = 0, start = -1; void *slot_replylen = addReplyDeferredLen(recording_client); for (int i = 0; i <= CLUSTER_SLOTS; i++) { @@ -1376,13 +1379,13 @@ sds generateClusterSlotResponse(void) { * or end of slot. */ if (i == CLUSTER_SLOTS || n != getNodeBySlot(i)) { addNodeReplyForClusterSlot(recording_client, n, start, i - 1); - num_masters++; + num_primaries++; if (i == CLUSTER_SLOTS) break; n = getNodeBySlot(i); start = i; } } - setDeferredArrayLen(recording_client, slot_replylen, num_masters); + setDeferredArrayLen(recording_client, slot_replylen, num_primaries); sds cluster_slot_response = aggregateClientOutputBuffer(recording_client); deleteCachedResponseClient(recording_client); return cluster_slot_response; @@ -1401,8 +1404,8 @@ int verifyCachedClusterSlotsResponse(sds cached_response) { void clusterCommandSlots(client *c) { /* Format: 1) 1) start slot * 2) end slot - * 3) 1) master IP - * 2) master port + * 3) 1) primary IP + * 2) primary port * 3) node ID * 4) 1) replica IP * 2) replica port @@ -1442,8 +1445,8 @@ void askingCommand(client *c) { } /* The READONLY command is used by clients to enter the read-only mode. - * In this mode slaves will not redirect clients as long as clients access - * with read-only commands to keys that are served by the slave's master. */ + * In this mode replica will not redirect clients as long as clients access + * with read-only commands to keys that are served by the replica's primary. */ void readonlyCommand(client *c) { if (server.cluster_enabled == 0) { addReplyError(c, "This instance has cluster support disabled"); diff --git a/src/cluster.h b/src/cluster.h index de58486440..f163e7f688 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -67,7 +67,7 @@ int clusterCommandSpecial(client *c); const char **clusterCommandExtendedHelp(void); int clusterAllowFailoverCmd(client *c); -void clusterPromoteSelfToMaster(void); +void clusterPromoteSelfToPrimary(void); int clusterManualFailoverTimeLimit(void); void clusterCommandSlots(client *c); @@ -83,18 +83,18 @@ int getClusterSize(void); int getMyShardSlotCount(void); int handleDebugClusterCommand(client *c); int clusterNodePending(clusterNode *node); -int clusterNodeIsMaster(clusterNode *n); +int clusterNodeIsPrimary(clusterNode *n); char **getClusterNodesList(size_t *numnodes); char *clusterNodeIp(clusterNode *node); -int clusterNodeIsSlave(clusterNode *node); -clusterNode *clusterNodeGetMaster(clusterNode *node); +int clusterNodeIsReplica(clusterNode *node); +clusterNode *clusterNodeGetPrimary(clusterNode *node); char *clusterNodeGetName(clusterNode *node); int clusterNodeTimedOut(clusterNode *node); int clusterNodeIsFailing(clusterNode *node); int clusterNodeIsNoFailover(clusterNode *node); char *clusterNodeGetShardId(clusterNode *node); -int clusterNodeNumSlaves(clusterNode *node); -clusterNode *clusterNodeGetSlave(clusterNode *node, int slave_idx); +int clusterNodeNumReplicas(clusterNode *node); +clusterNode *clusterNodeGetReplica(clusterNode *node, int slave_idx); clusterNode *getMigratingSlotDest(int slot); clusterNode *getImportingSlotSource(int slot); clusterNode *getNodeBySlot(int slot); @@ -103,7 +103,6 @@ char *clusterNodeHostname(clusterNode *node); const char *clusterNodePreferredEndpoint(clusterNode *n); long long clusterNodeReplOffset(clusterNode *node); clusterNode *clusterLookupNode(const char *name, int length); -void clusterReplicateOpenSlots(void); int detectAndUpdateCachedNodeHealth(void); client *createCachedResponseClient(void); void deleteCachedResponseClient(client *recording_client); diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 0822429934..cd3786fe05 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -63,14 +63,14 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request); void clusterUpdateState(void); int clusterNodeCoversSlot(clusterNode *n, int slot); list *clusterGetNodesInMyShard(clusterNode *node); -int clusterNodeAddSlave(clusterNode *master, clusterNode *slave); +int clusterNodeAddReplica(clusterNode *primary, clusterNode *replica); int clusterAddSlot(clusterNode *n, int slot); int clusterDelSlot(int slot); int clusterDelNodeSlots(clusterNode *node); int clusterNodeSetSlotBit(clusterNode *n, int slot); -void clusterSetMaster(clusterNode *n, int closeSlots); -void clusterHandleSlaveFailover(void); -void clusterHandleSlaveMigration(int max_slaves); +void clusterSetPrimary(clusterNode *n, int closeSlots); +void clusterHandleReplicaFailover(void); +void clusterHandleReplicaMigration(int max_replicas); int bitmapTestBit(unsigned char *bitmap, int pos); void bitmapSetBit(unsigned char *bitmap, int pos); void bitmapClearBit(unsigned char *bitmap, int pos); @@ -78,7 +78,7 @@ void clusterDoBeforeSleep(int flags); void clusterSendUpdate(clusterLink *link, clusterNode *node); void resetManualFailover(void); void clusterCloseAllSlots(void); -void clusterSetNodeAsMaster(clusterNode *n); +void clusterSetNodeAsPrimary(clusterNode *n); void clusterDelNode(clusterNode *delnode); sds representClusterNodeFlags(sds ci, uint16_t flags); sds representSlotInfo(sds ci, uint16_t *slot_info_pairs, int slot_info_pairs_count); @@ -113,6 +113,8 @@ int auxTlsPortPresent(clusterNode *n); static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen); void freeClusterLink(clusterLink *link); int verifyClusterNodeId(const char *name, int length); +sds clusterEncodeOpenSlotsAuxField(int rdbflags); +int clusterDecodeOpenSlotsAuxField(int rdbflags, sds s); int getNodeDefaultClientPort(clusterNode *n) { return server.tls_cluster ? n->tls_port : n->tcp_port; @@ -144,7 +146,6 @@ static inline int defaultClientPort(void) { dictType clusterNodesDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -157,7 +158,6 @@ dictType clusterNodesDictType = { dictType clusterNodesBlackListDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -168,7 +168,6 @@ dictType clusterNodesBlackListDictType = { dictType clusterSdsToListType = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ @@ -227,8 +226,8 @@ int auxShardIdSetter(clusterNode *n, void *value, int length) { memcpy(n->shard_id, value, CLUSTER_NAMELEN); /* if n already has replicas, make sure they all agree * on the shard id */ - for (int i = 0; i < n->numslaves; i++) { - if (memcmp(n->slaves[i]->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { + for (int i = 0; i < n->num_replicas; i++) { + if (memcmp(n->replicas[i]->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { return C_ERR; } } @@ -361,7 +360,7 @@ int clusterLoadConfig(char *filename) { while (fgets(line, maxline, fp) != NULL) { int argc, aux_argc; sds *argv, *aux_argv; - clusterNode *n, *master; + clusterNode *n, *primary; char *p, *s; /* Skip blank lines, they can be created either by users manually @@ -529,9 +528,9 @@ int clusterLoadConfig(char *filename) { myself = server.cluster->myself = n; n->flags |= CLUSTER_NODE_MYSELF; } else if (!strcasecmp(s, "master")) { - n->flags |= CLUSTER_NODE_MASTER; + n->flags |= CLUSTER_NODE_PRIMARY; } else if (!strcasecmp(s, "slave")) { - n->flags |= CLUSTER_NODE_SLAVE; + n->flags |= CLUSTER_NODE_REPLICA; } else if (!strcasecmp(s, "fail?")) { n->flags |= CLUSTER_NODE_PFAIL; } else if (!strcasecmp(s, "fail")) { @@ -546,37 +545,37 @@ int clusterLoadConfig(char *filename) { } else if (!strcasecmp(s, "noflags")) { /* nothing to do */ } else { - serverPanic("Unknown flag in redis cluster config file"); + serverPanic("Unknown flag in %s cluster config file", SERVER_TITLE); } if (p) s = p + 1; } - /* Get master if any. Set the master and populate master's - * slave list. */ + /* Get primary if any. Set the primary and populate primary's + * replica list. */ if (argv[3][0] != '-') { if (verifyClusterNodeId(argv[3], sdslen(argv[3])) == C_ERR) { sdsfreesplitres(argv, argc); goto fmterr; } - master = clusterLookupNode(argv[3], sdslen(argv[3])); - if (!master) { - master = createClusterNode(argv[3], 0); - clusterAddNode(master); + primary = clusterLookupNode(argv[3], sdslen(argv[3])); + if (!primary) { + primary = createClusterNode(argv[3], 0); + clusterAddNode(primary); } /* shard_id can be absent if we are loading a nodes.conf generated - * by an older version of Redis; we should follow the primary's + * by an older version; we should follow the primary's * shard_id in this case */ if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { - memcpy(n->shard_id, master->shard_id, CLUSTER_NAMELEN); - clusterAddNodeToShard(master->shard_id, n); - } else if (clusterGetNodesInMyShard(master) != NULL && - memcmp(master->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { + memcpy(n->shard_id, primary->shard_id, CLUSTER_NAMELEN); + clusterAddNodeToShard(primary->shard_id, n); + } else if (clusterGetNodesInMyShard(primary) != NULL && + memcmp(primary->shard_id, n->shard_id, CLUSTER_NAMELEN) != 0) { /* If the primary has been added to a shard, make sure this * node has the same persisted shard id as the primary. */ goto fmterr; } - n->slaveof = master; - clusterNodeAddSlave(master, n); + n->replicaof = primary; + clusterNodeAddReplica(primary, n); } else if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { /* n is a primary but it does not have a persisted shard_id. * This happens if we are loading a nodes.conf generated by @@ -592,7 +591,7 @@ int clusterLoadConfig(char *filename) { /* Set configEpoch for this node. * If the node is a replica, set its config epoch to 0. * If it's a primary, load the config epoch from the configuration file. */ - n->configEpoch = (nodeIsSlave(n) && n->slaveof) ? 0 : strtoull(argv[6], NULL, 10); + n->configEpoch = (nodeIsReplica(n) && n->replicaof) ? 0 : strtoull(argv[6], NULL, 10); /* Populate hash slots served by this instance. */ for (j = 8; j < argc; j++) { @@ -752,7 +751,6 @@ void clusterSaveConfigOrDie(int do_fsync) { serverLog(LL_WARNING, "Fatal: can't update cluster config file."); exit(1); } - clearCachedClusterSlotsResponse(); } /* Lock the cluster config using flock(), and retain the file descriptor used to @@ -832,7 +830,7 @@ void deriveAnnouncedPorts(int *announced_tcp_port, int *announced_tls_port, int void clusterUpdateMyselfFlags(void) { if (!myself) return; int oldflags = myself->flags; - int nofailover = server.cluster_slave_no_failover ? CLUSTER_NODE_NOFAILOVER : 0; + int nofailover = server.cluster_replica_no_failover ? CLUSTER_NODE_NOFAILOVER : 0; myself->flags &= ~CLUSTER_NODE_NOFAILOVER; myself->flags |= nofailover; if (myself->flags != oldflags) { @@ -920,7 +918,7 @@ static void updateShardId(clusterNode *node, const char *shard_id) { clusterAddNodeToShard(shard_id, node); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } - if (shard_id && myself != node && myself->slaveof == node) { + if (shard_id && myself != node && myself->replicaof == node) { if (memcmp(myself->shard_id, shard_id, CLUSTER_NAMELEN) != 0) { /* shard-id can diverge right after a rolling upgrade * from pre-7.2 releases */ @@ -937,7 +935,7 @@ static inline int areInSameShard(clusterNode *node1, clusterNode *node2) { } static inline uint64_t nodeEpoch(clusterNode *n) { - return n->slaveof ? n->slaveof->configEpoch : n->configEpoch; + return n->replicaof ? n->replicaof->configEpoch : n->configEpoch; } /* Update my hostname based on server configuration values */ @@ -992,7 +990,7 @@ void clusterInit(void) { if (clusterLoadConfig(server.cluster_configfile) == C_ERR) { /* No configuration found. We will just use the random name provided * by the createClusterNode() function. */ - myself = server.cluster->myself = createClusterNode(NULL, CLUSTER_NODE_MYSELF | CLUSTER_NODE_MASTER); + myself = server.cluster->myself = createClusterNode(NULL, CLUSTER_NODE_MYSELF | CLUSTER_NODE_PRIMARY); serverLog(LL_NOTICE, "No cluster configuration found, I'm %.40s", myself->name); clusterAddNode(myself); clusterAddNodeToShard(myself->shard_id, myself); @@ -1018,12 +1016,16 @@ void clusterInit(void) { exit(1); } + /* Register our own rdb aux fields */ + serverAssert(rdbRegisterAuxField("cluster-slot-states", clusterEncodeOpenSlotsAuxField, + clusterDecodeOpenSlotsAuxField) == C_OK); + /* Set myself->port/cport/pport to my listening ports, we'll just need to * discover the IP address via MEET messages. */ deriveAnnouncedPorts(&myself->tcp_port, &myself->tls_port, &myself->cport); server.cluster->mf_end = 0; - server.cluster->mf_slave = NULL; + server.cluster->mf_replica = NULL; for (connTypeForCaching conn_type = CACHE_CONN_TCP; conn_type < CACHE_CONN_TYPE_MAX; conn_type++) { server.cached_cluster_slot_info[conn_type] = NULL; } @@ -1063,20 +1065,20 @@ void clusterInitLast(void) { * * 1) All other nodes are forgotten. * 2) All the assigned / open slots are released. - * 3) If the node is a slave, it turns into a master. + * 3) If the node is a replica, it turns into a primary. * 4) Only for hard reset: a new Node ID is generated. * 5) Only for hard reset: currentEpoch and configEpoch are set to 0. * 6) The new configuration is saved and the cluster state updated. - * 7) If the node was a slave, the whole data set is flushed away. */ + * 7) If the node was a replica, the whole data set is flushed away. */ void clusterReset(int hard) { dictIterator *di; dictEntry *de; int j; - /* Turn into master. */ - if (nodeIsSlave(myself)) { - clusterSetNodeAsMaster(myself); - replicationUnsetMaster(); + /* Turn into primary. */ + if (nodeIsReplica(myself)) { + clusterSetNodeAsPrimary(myself); + replicationUnsetPrimary(); emptyData(-1, EMPTYDB_NO_FLAGS, NULL); } @@ -1251,7 +1253,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* If the server is starting up, don't accept cluster connections: * UPDATE messages may interact with the database content. */ - if (server.masterhost == NULL && server.loading) return; + if (server.primary_host == NULL && server.loading) return; while (max--) { cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); @@ -1322,9 +1324,9 @@ clusterNode *createClusterNode(char *nodename, int flags) { node->slot_info_pairs = NULL; node->slot_info_pairs_count = 0; node->numslots = 0; - node->numslaves = 0; - node->slaves = NULL; - node->slaveof = NULL; + node->num_replicas = 0; + node->replicas = NULL; + node->replicaof = NULL; node->last_in_ping_gossip = 0; node->ping_sent = node->pong_received = 0; node->data_received = 0; @@ -1445,43 +1447,44 @@ static int clusterNodeNameComparator(const void *node1, const void *node2) { return strncasecmp((*(clusterNode **)node1)->name, (*(clusterNode **)node2)->name, CLUSTER_NAMELEN); } -int clusterNodeRemoveSlave(clusterNode *master, clusterNode *slave) { +int clusterNodeRemoveReplica(clusterNode *primary, clusterNode *replica) { int j; - for (j = 0; j < master->numslaves; j++) { - if (master->slaves[j] == slave) { - if ((j + 1) < master->numslaves) { - int remaining_slaves = (master->numslaves - j) - 1; - memmove(master->slaves + j, master->slaves + (j + 1), (sizeof(*master->slaves) * remaining_slaves)); + for (j = 0; j < primary->num_replicas; j++) { + if (primary->replicas[j] == replica) { + if ((j + 1) < primary->num_replicas) { + int remaining_replicas = (primary->num_replicas - j) - 1; + memmove(primary->replicas + j, primary->replicas + (j + 1), + (sizeof(*primary->replicas) * remaining_replicas)); } - master->numslaves--; - if (master->numslaves == 0) master->flags &= ~CLUSTER_NODE_MIGRATE_TO; + primary->num_replicas--; + if (primary->num_replicas == 0) primary->flags &= ~CLUSTER_NODE_MIGRATE_TO; return C_OK; } } return C_ERR; } -int clusterNodeAddSlave(clusterNode *master, clusterNode *slave) { +int clusterNodeAddReplica(clusterNode *primary, clusterNode *replica) { int j; - /* If it's already a slave, don't add it again. */ - for (j = 0; j < master->numslaves; j++) - if (master->slaves[j] == slave) return C_ERR; - master->slaves = zrealloc(master->slaves, sizeof(clusterNode *) * (master->numslaves + 1)); - master->slaves[master->numslaves] = slave; - master->numslaves++; - qsort(master->slaves, master->numslaves, sizeof(clusterNode *), clusterNodeNameComparator); - master->flags |= CLUSTER_NODE_MIGRATE_TO; + /* If it's already a replica, don't add it again. */ + for (j = 0; j < primary->num_replicas; j++) + if (primary->replicas[j] == replica) return C_ERR; + primary->replicas = zrealloc(primary->replicas, sizeof(clusterNode *) * (primary->num_replicas + 1)); + primary->replicas[primary->num_replicas] = replica; + primary->num_replicas++; + qsort(primary->replicas, primary->num_replicas, sizeof(clusterNode *), clusterNodeNameComparator); + primary->flags |= CLUSTER_NODE_MIGRATE_TO; return C_OK; } -int clusterCountNonFailingSlaves(clusterNode *n) { - int j, okslaves = 0; +int clusterCountNonFailingReplicas(clusterNode *n) { + int j, ok_replicas = 0; - for (j = 0; j < n->numslaves; j++) - if (!nodeFailed(n->slaves[j])) okslaves++; - return okslaves; + for (j = 0; j < n->num_replicas; j++) + if (!nodeFailed(n->replicas[j])) ok_replicas++; + return ok_replicas; } /* Low level cleanup of the node structure. Only called by clusterDelNode(). */ @@ -1489,12 +1492,12 @@ void freeClusterNode(clusterNode *n) { sds nodename; int j; - /* If the node has associated slaves, we have to set - * all the slaves->slaveof fields to NULL (unknown). */ - for (j = 0; j < n->numslaves; j++) n->slaves[j]->slaveof = NULL; + /* If the node has associated replicas, we have to set + * all the replicas->replicaof fields to NULL (unknown). */ + for (j = 0; j < n->num_replicas; j++) n->replicas[j]->replicaof = NULL; - /* Remove this node from the list of slaves of its master. */ - if (nodeIsSlave(n) && n->slaveof) clusterNodeRemoveSlave(n->slaveof, n); + /* Remove this node from the list of replicas of its primary. */ + if (nodeIsReplica(n) && n->replicaof) clusterNodeRemoveReplica(n->replicaof, n); /* Unlink from the set of nodes. */ nodename = sdsnewlen(n->name, CLUSTER_NAMELEN); @@ -1507,7 +1510,7 @@ void freeClusterNode(clusterNode *n) { if (n->link) freeClusterLink(n->link); if (n->inbound_link) freeClusterLink(n->inbound_link); listRelease(n->fail_reports); - zfree(n->slaves); + zfree(n->replicas); zfree(n); } @@ -1528,8 +1531,8 @@ void clusterAddNode(clusterNode *node) { * other nodes. * 3) Remove the node from the owning shard * 4) Free the node with freeClusterNode() that will in turn remove it - * from the hash table and from the list of slaves of its master, if - * it is a slave node. + * from the hash table and from the list of replicas of its primary, if + * it is a replica node. */ void clusterDelNode(clusterNode *delnode) { int j; @@ -1572,7 +1575,7 @@ clusterNode *clusterLookupNode(const char *name, int length) { /* Get all the nodes in my shard. * Note that the list returned is not computed on the fly - * via slaveof; rather, it is maintained permanently to + * via replicaof; rather, it is maintained permanently to * track the shard membership and its life cycle is tied * to this process. Therefore, the caller must not * release the list. */ @@ -1672,8 +1675,8 @@ uint64_t clusterGetMaxEpoch(void) { * * 1) When slots are closed after importing. Otherwise resharding would be * too expensive. - * 2) When CLUSTER FAILOVER is called with options that force a slave to - * failover its master even if there is not master majority able to + * 2) When CLUSTER FAILOVER is called with options that force a replica to + * failover its primary even if there is not primary majority able to * create a new configuration epoch. * * The cluster will not explode using this function, even in the case of @@ -1696,14 +1699,14 @@ int clusterBumpConfigEpochWithoutConsensus(void) { } } -/* This function is called when this node is a master, and we receive from - * another master a configuration epoch that is equal to our configuration +/* This function is called when this node is a primary, and we receive from + * another primary a configuration epoch that is equal to our configuration * epoch. * * BACKGROUND * - * It is not possible that different slaves get the same config - * epoch during a failover election, because the slaves need to get voted + * It is not possible that different replicas get the same config + * epoch during a failover election, because the replicas need to get voted * by a majority. However when we perform a manual resharding of the cluster * the node will assign a configuration epoch to itself without to ask * for agreement. Usually resharding happens when the cluster is working well @@ -1722,13 +1725,13 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * end with a different configEpoch at startup automatically. * * In all the cases, we want a mechanism that resolves this issue automatically - * as a safeguard. The same configuration epoch for masters serving different + * as a safeguard. The same configuration epoch for primaries serving different * set of slots is not harmful, but it is if the nodes end serving the same * slots for some reason (manual errors or software bugs) without a proper * failover procedure. * * In general we want a system that eventually always ends with different - * masters having different configuration epochs whatever happened, since + * primaries having different configuration epochs whatever happened, since * nothing is worse than a split-brain condition in a distributed system. * * BEHAVIOR @@ -1743,8 +1746,8 @@ int clusterBumpConfigEpochWithoutConsensus(void) { * end with a different configuration epoch. */ void clusterHandleConfigEpochCollision(clusterNode *sender) { - /* Prerequisites: nodes have the same configEpoch and are both masters. */ - if (sender->configEpoch != myself->configEpoch || !clusterNodeIsMaster(sender) || !clusterNodeIsMaster(myself)) + /* Prerequisites: nodes have the same configEpoch and are both primaries. */ + if (sender->configEpoch != myself->configEpoch || !clusterNodeIsPrimary(sender) || !clusterNodeIsPrimary(myself)) return; /* Don't act if the colliding node has a smaller Node ID. */ if (memcmp(sender->name, myself->name, CLUSTER_NAMELEN) <= 0) return; @@ -1838,8 +1841,8 @@ int clusterBlacklistExists(char *nodeid) { /* This function checks if a given node should be marked as FAIL. * It happens if the following conditions are met: * - * 1) We received enough failure reports from other master nodes via gossip. - * Enough means that the majority of the masters signaled the node is + * 1) We received enough failure reports from other primary nodes via gossip. + * Enough means that the majority of the primaries signaled the node is * down recently. * 2) We believe this node is in PFAIL state. * @@ -1847,13 +1850,13 @@ int clusterBlacklistExists(char *nodeid) { * event trying to force every other node to set the FAIL flag for the node. * * Note that the form of agreement used here is weak, as we collect the majority - * of masters state during some time, and even if we force agreement by + * of primaries state during some time, and even if we force agreement by * propagating the FAIL message, because of partitions we may not reach every * node. However: * * 1) Either we reach the majority and eventually the FAIL state will propagate * to all the cluster. - * 2) Or there is no majority so no slave promotion will be authorized and the + * 2) Or there is no majority so no replica promotion will be authorized and the * FAIL flag will be cleared after some time. */ void markNodeAsFailingIfNeeded(clusterNode *node) { @@ -1864,9 +1867,9 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { if (nodeFailed(node)) return; /* Already FAILing. */ failures = clusterNodeFailureReportsCount(node); - /* Also count myself as a voter if I'm a master. */ - if (clusterNodeIsMaster(myself)) failures++; - if (failures < needed_quorum) return; /* No weak agreement from masters. */ + /* Also count myself as a voter if I'm a primary. */ + if (clusterNodeIsPrimary(myself)) failures++; + if (failures < needed_quorum) return; /* No weak agreement from primaries. */ serverLog(LL_NOTICE, "Marking node %.40s (%s) as failing (quorum reached).", node->name, node->human_nodename); @@ -1877,8 +1880,8 @@ void markNodeAsFailingIfNeeded(clusterNode *node) { /* Broadcast the failing node name to everybody, forcing all the other * reachable nodes to flag the node as FAIL. - * We do that even if this node is a replica and not a master: anyway - * the failing state is triggered collecting failure reports from masters, + * We do that even if this node is a replica and not a primary: anyway + * the failing state is triggered collecting failure reports from primaries, * so here the replica is only helping propagating this status. */ clusterSendFail(node->name); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); @@ -1892,20 +1895,20 @@ void clearNodeFailureIfNeeded(clusterNode *node) { serverAssert(nodeFailed(node)); - /* For slaves we always clear the FAIL flag if we can contact the + /* For replicas we always clear the FAIL flag if we can contact the * node again. */ - if (nodeIsSlave(node) || node->numslots == 0) { + if (nodeIsReplica(node) || node->numslots == 0) { serverLog(LL_NOTICE, "Clear FAIL state for node %.40s (%s):%s is reachable again.", node->name, - node->human_nodename, nodeIsSlave(node) ? "replica" : "master without slots"); + node->human_nodename, nodeIsReplica(node) ? "replica" : "master without slots"); node->flags &= ~CLUSTER_NODE_FAIL; clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); } - /* If it is a master and... + /* If it is a primary and... * 1) The FAIL state is old enough. * 2) It is yet serving slots from our point of view (not failed over). * Apparently no one is going to fix these slots, clear the FAIL flag. */ - if (clusterNodeIsMaster(node) && node->numslots > 0 && + if (clusterNodeIsPrimary(node) && node->numslots > 0 && (now - node->fail_time) > (server.cluster_node_timeout * CLUSTER_FAIL_UNDO_TIME_MULT)) { serverLog( LL_NOTICE, @@ -2087,8 +2090,8 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { /* Ignore gossips about self. */ if (node && node != myself) { /* We already know this node. - Handle failure reports, only when the sender is a master. */ - if (sender && clusterNodeIsMaster(sender)) { + Handle failure reports, only when the sender is a primary. */ + if (sender && clusterNodeIsPrimary(sender)) { if (flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) { if (clusterNodeAddFailureReport(node, sender)) { serverLog(LL_VERBOSE, "Node %.40s (%s) reported node %.40s (%s) as not reachable.", @@ -2228,32 +2231,32 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, clusterMsg * serverLog(LL_NOTICE, "Address updated for node %.40s (%s), now %s:%d", node->name, node->human_nodename, node->ip, getNodeDefaultClientPort(node)); - /* Check if this is our master and we have to change the + /* Check if this is our primary and we have to change the * replication target as well. */ - if (nodeIsSlave(myself) && myself->slaveof == node) - replicationSetMaster(node->ip, getNodeDefaultReplicationPort(node)); + if (nodeIsReplica(myself) && myself->replicaof == node) + replicationSetPrimary(node->ip, getNodeDefaultReplicationPort(node)); return 1; } -/* Reconfigure the specified node 'n' as a master. This function is called when - * a node that we believed to be a slave is now acting as master in order to +/* Reconfigure the specified node 'n' as a primary. This function is called when + * a node that we believed to be a replica is now acting as primary in order to * update the state of the node. */ -void clusterSetNodeAsMaster(clusterNode *n) { - if (clusterNodeIsMaster(n)) return; +void clusterSetNodeAsPrimary(clusterNode *n) { + if (clusterNodeIsPrimary(n)) return; - if (n->slaveof) { - clusterNodeRemoveSlave(n->slaveof, n); + if (n->replicaof) { + clusterNodeRemoveReplica(n->replicaof, n); if (n != myself) n->flags |= CLUSTER_NODE_MIGRATE_TO; } - n->flags &= ~CLUSTER_NODE_SLAVE; - n->flags |= CLUSTER_NODE_MASTER; - n->slaveof = NULL; + n->flags &= ~CLUSTER_NODE_REPLICA; + n->flags |= CLUSTER_NODE_PRIMARY; + n->replicaof = NULL; /* Update config and state. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE); } -/* This function is called when we receive a master configuration via a +/* This function is called when we receive a primary configuration via a * PING, PONG or UPDATE packet. What we receive is a node, a configEpoch of the * node, and the set of slots claimed under this configEpoch. * @@ -2266,27 +2269,27 @@ void clusterSetNodeAsMaster(clusterNode *n) { * case we receive the info via an UPDATE packet. */ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoch, unsigned char *slots) { int j; - clusterNode *curmaster = NULL, *newmaster = NULL; + clusterNode *cur_primary = NULL, *new_primary = NULL; /* The dirty slots list is a list of slots for which we lose the ownership * while having still keys inside. This usually happens after a failover * or after a manual cluster reconfiguration operated by the admin. * - * If the update message is not able to demote a master to slave (in this - * case we'll resync with the master updating the whole key space), we + * If the update message is not able to demote a primary to replica (in this + * case we'll resync with the primary updating the whole key space), we * need to delete all the keys in the slots we lost ownership. */ uint16_t dirty_slots[CLUSTER_SLOTS]; int dirty_slots_count = 0; - /* We should detect if sender is new master of our shard. + /* We should detect if sender is new primary of our shard. * We will know it if all our slots were migrated to sender, and sender * has no slots except ours */ int sender_slots = 0; int migrated_our_slots = 0; - /* Here we set curmaster to this node or the node this node - * replicates to if it's a slave. In the for loop we are - * interested to check if slots are taken away from curmaster. */ - curmaster = clusterNodeIsMaster(myself) ? myself : myself->slaveof; + /* Here we set cur_primary to this node or the node this node + * replicates to if it's a replica. In the for loop we are + * interested to check if slots are taken away from cur_primary. */ + cur_primary = clusterNodeIsPrimary(myself) ? myself : myself->replicaof; if (sender == myself) { serverLog(LL_NOTICE, "Discarding UPDATE message about myself."); @@ -2323,8 +2326,8 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc dirty_slots_count++; } - if (server.cluster->slots[j] == curmaster) { - newmaster = sender; + if (server.cluster->slots[j] == cur_primary) { + new_primary = sender; migrated_our_slots++; } @@ -2390,7 +2393,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * sender if it has just taken over the primary role. */ if (server.cluster->migrating_slots_to[j] != NULL && server.cluster->migrating_slots_to[j] != sender && (server.cluster->migrating_slots_to[j]->configEpoch < senderConfigEpoch || - nodeIsSlave(server.cluster->migrating_slots_to[j])) && + nodeIsReplica(server.cluster->migrating_slots_to[j])) && areInSameShard(server.cluster->migrating_slots_to[j], sender)) { serverLog(LL_NOTICE, "Failover occurred in migration target." @@ -2416,7 +2419,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * 1. Remove the importing state for the specific slot. * 2. Finalize the slot's ownership, if I am not already the owner of * the slot. */ - if (nodeIsMaster(myself) && server.cluster->importing_slots_from[j] == sender) { + if (nodeIsPrimary(myself) && server.cluster->importing_slots_from[j] == sender) { serverLog(LL_NOTICE, "Slot %d is no longer being imported from node %.40s (%s) in shard %.40s;" " Clear my importing source for the slot.", @@ -2451,13 +2454,13 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * keys redirections. */ if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return; - /* Handle a special case where newmaster is not set but both sender + /* Handle a special case where new_primary is not set but both sender * and myself own no slots and in the same shard. Set the sender as * the new primary if my current config epoch is lower than the * sender's. */ - if (!newmaster && myself->slaveof != sender && sender_slots == 0 && myself->numslots == 0 && + if (!new_primary && myself->replicaof != sender && sender_slots == 0 && myself->numslots == 0 && nodeEpoch(myself) < senderConfigEpoch && areInSameShard(sender, myself)) { - newmaster = sender; + new_primary = sender; } /* If the shard to which this node (myself) belongs loses all of @@ -2479,7 +2482,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * shard and our primary just had its last slot migrated to the * sender. In this case we don't reconfigure ourselves as a replica * of the sender. */ - if (newmaster && curmaster->numslots == 0) { + if (new_primary && cur_primary->numslots == 0) { if (server.cluster_allow_replica_migration || areInSameShard(sender, myself)) { serverLog(LL_NOTICE, "Configuration change detected. Reconfiguring myself " @@ -2487,7 +2490,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc sender->name, sender->human_nodename, sender->shard_id); /* Don't clear the migrating/importing states if this is a replica that * just gets promoted to the new primary in the shard. */ - clusterSetMaster(sender, !areInSameShard(sender, myself)); + clusterSetPrimary(sender, !areInSameShard(sender, myself)); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } else if ((sender_slots >= migrated_our_slots) && !areInSameShard(sender, myself)) { /* When all our slots are lost to the sender and the sender belongs to @@ -2495,14 +2498,14 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * migration. Don't reconfigure this node to migrate to the new shard * in this case. */ serverLog(LL_NOTICE, - "My last slot was migrated to node %.40s (%s) in shard %.40s. I am now an empty master.", + "My last slot was migrated to node %.40s (%s) in shard %.40s. I am now an empty primary.", sender->name, sender->human_nodename, sender->shard_id); } } else if (dirty_slots_count) { /* If we are here, we received an update message which removed * ownership for certain slots we still have keys about, but still - * we are serving some slots, so this master node was not demoted to - * a slave. + * we are serving some slots, so this primary node was not demoted to + * a replica. * * In order to maintain a consistent state between keys and slots * we need to remove all the keys from the slots we lost. */ @@ -2576,10 +2579,6 @@ void *preparePingExt(clusterMsgPingExt *ext, uint16_t type, uint32_t length) { return &ext->ext[0]; } -clusterMsgPingExt *nextPingExt(clusterMsgPingExt *ext) { - return (clusterMsgPingExt *)((char *)ext + ntohl(ext->length)); -} - /* 1. If a NULL hdr is provided, compute the extension size; * 2. If a non-NULL hdr is provided, write the hostname ping * extension at the start of the cursor. This function @@ -2604,7 +2603,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { memcpy(ext->hostname, myself->hostname, sdslen(myself->hostname)); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getHostnamePingExtSize(); @@ -2619,7 +2618,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { memcpy(ext->human_nodename, myself->human_nodename, sdslen(myself->human_nodename)); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getHumanNodenamePingExtSize(); @@ -2641,7 +2640,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { ext->ttl = htonu64(ttl); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getForgottenNodeExtSize(); extensions++; @@ -2655,7 +2654,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { memcpy(ext->shard_id, myself->shard_id, CLUSTER_NAMELEN); /* Move the write cursor */ - cursor = nextPingExt(cursor); + cursor = getNextPingExt(cursor); } totlen += getShardIdPingExtSize(); extensions++; @@ -2690,7 +2689,7 @@ void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) { } else if (type == CLUSTERMSG_EXT_TYPE_FORGOTTEN_NODE) { clusterMsgPingExtForgottenNode *forgotten_node_ext = &(ext->ext[0].forgotten_node); clusterNode *n = clusterLookupNode(forgotten_node_ext->name, CLUSTER_NAMELEN); - if (n && n != myself && !(nodeIsSlave(myself) && myself->slaveof == n)) { + if (n && n != myself && !(nodeIsReplica(myself) && myself->replicaof == n)) { sds id = sdsnewlen(forgotten_node_ext->name, CLUSTER_NAMELEN); dictEntry *de = dictAddOrFind(server.cluster->nodes_black_list, id); uint64_t expire = server.unixtime + ntohu64(forgotten_node_ext->ttl); @@ -2722,9 +2721,9 @@ void clusterProcessPingExtensions(clusterMsg *hdr, clusterLink *link) { * As the cluster progressively upgrades to version 7.2, we can expect the shard_ids * across all nodes to naturally converge and align. * - * If sender is a replica, set the shard_id to the shard_id of its master. + * If sender is a replica, set the shard_id to the shard_id of its primary. * Otherwise, we'll set it now. */ - if (ext_shardid == NULL) ext_shardid = clusterNodeGetMaster(sender)->shard_id; + if (ext_shardid == NULL) ext_shardid = clusterNodeGetPrimary(sender)->shard_id; updateShardId(sender, ext_shardid); } @@ -2871,7 +2870,7 @@ int clusterProcessPacket(clusterLink *link) { senderConfigEpoch = ntohu64(hdr->configEpoch); if (senderCurrentEpoch > server.cluster->currentEpoch) server.cluster->currentEpoch = senderCurrentEpoch; /* Update the sender configEpoch if it is a primary publishing a newer one. */ - if (!memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof)) && + if (!memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof)) && senderConfigEpoch > sender->configEpoch) { sender->configEpoch = senderConfigEpoch; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_FSYNC_CONFIG); @@ -2879,16 +2878,16 @@ int clusterProcessPacket(clusterLink *link) { /* Update the replication offset info for this node. */ sender->repl_offset = ntohu64(hdr->offset); sender->repl_offset_time = now; - /* If we are a slave performing a manual failover and our master + /* If we are a replica performing a manual failover and our primary * sent its offset while already paused, populate the MF state. */ - if (server.cluster->mf_end && nodeIsSlave(myself) && myself->slaveof == sender && - hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && server.cluster->mf_master_offset == -1) { - server.cluster->mf_master_offset = sender->repl_offset; + if (server.cluster->mf_end && nodeIsReplica(myself) && myself->replicaof == sender && + hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && server.cluster->mf_primary_offset == -1) { + server.cluster->mf_primary_offset = sender->repl_offset; clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_MANUALFAILOVER); serverLog(LL_NOTICE, "Received replication offset for paused " - "master manual failover: %lld", - server.cluster->mf_master_offset); + "primary manual failover: %lld", + server.cluster->mf_primary_offset); } } @@ -2917,7 +2916,7 @@ int clusterProcessPacket(clusterLink *link) { /* Add this node if it is new for us and the msg type is MEET. * In this stage we don't try to add the node with the right - * flags, slaveof pointer, and so forth, as this details will be + * flags, replicaof pointer, and so forth, as this details will be * resolved when we'll receive PONGs from the node. */ if (!sender && type == CLUSTERMSG_TYPE_MEET) { clusterNode *node; @@ -2966,7 +2965,7 @@ int clusterProcessPacket(clusterLink *link) { clusterRenameNode(link->node, hdr->sender); serverLog(LL_DEBUG, "Handshake with node %.40s completed.", link->node->name); link->node->flags &= ~CLUSTER_NODE_HANDSHAKE; - link->node->flags |= flags & (CLUSTER_NODE_MASTER | CLUSTER_NODE_SLAVE); + link->node->flags |= flags & (CLUSTER_NODE_PRIMARY | CLUSTER_NODE_REPLICA); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); } else if (memcmp(link->node->name, hdr->sender, CLUSTER_NAMELEN) != 0) { /* If the reply has a non matching node ID we @@ -2991,8 +2990,8 @@ int clusterProcessPacket(clusterLink *link) { /* Copy the CLUSTER_NODE_NOFAILOVER flag from what the sender * announced. This is a dynamic flag that we receive from the * sender, and the latest status must be trusted. We need it to - * be propagated because the slave ranking used to understand the - * delay of each slave in the voting process, needs to know + * be propagated because the replica ranking used to understand the + * delay of each replica in the voting process, needs to know * what are the instances really competing. */ if (sender) { int nofailover = flags & CLUSTER_NODE_NOFAILOVER; @@ -3025,23 +3024,23 @@ int clusterProcessPacket(clusterLink *link) { } } - /* Check for role switch: slave -> master or master -> slave. */ + /* Check for role switch: replica -> primary or primary -> replica. */ if (sender) { serverLog(LL_DEBUG, "node %.40s (%s) announces that it is a %s in shard %.40s", sender->name, sender->human_nodename, - !memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof)) ? "master" : "slave", + !memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof)) ? "primary" : "replica", sender->shard_id); - if (!memcmp(hdr->slaveof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->slaveof))) { - /* Node is a master. */ - clusterSetNodeAsMaster(sender); + if (!memcmp(hdr->replicaof, CLUSTER_NODE_NULL_NAME, sizeof(hdr->replicaof))) { + /* Node is a primary. */ + clusterSetNodeAsPrimary(sender); } else { - /* Node is a slave. */ - clusterNode *master = clusterLookupNode(hdr->slaveof, CLUSTER_NAMELEN); + /* Node is a replica. */ + clusterNode *primary = clusterLookupNode(hdr->replicaof, CLUSTER_NAMELEN); - if (clusterNodeIsMaster(sender)) { - /* Master turned into a slave! Reconfigure the node. */ - if (master && areInSameShard(master, sender)) { - /* `sender` was a primary and was in the same shard as `master`, its new primary */ + if (clusterNodeIsPrimary(sender)) { + /* Primary turned into a replica! Reconfigure the node. */ + if (primary && areInSameShard(primary, sender)) { + /* `sender` was a primary and was in the same shard as its new primary */ if (sender->configEpoch > senderConfigEpoch) { serverLog(LL_NOTICE, "Ignore stale message from %.40s (%s) in shard %.40s;" @@ -3049,48 +3048,48 @@ int clusterProcessPacket(clusterLink *link) { sender->name, sender->human_nodename, sender->shard_id, (unsigned long long)senderConfigEpoch, (unsigned long long)sender->configEpoch); } else { - /* `master` is still a `slave` in this observer node's view; update its role and configEpoch - */ - clusterSetNodeAsMaster(master); - master->configEpoch = senderConfigEpoch; + /* `primary` is still a `replica` in this observer node's view; + * update its role and configEpoch */ + clusterSetNodeAsPrimary(primary); + primary->configEpoch = senderConfigEpoch; serverLog(LL_NOTICE, "A failover occurred in shard %.40s; node %.40s (%s)" " failed over to node %.40s (%s) with a config epoch of %llu", - sender->shard_id, sender->name, sender->human_nodename, master->name, - master->human_nodename, (unsigned long long)master->configEpoch); + sender->shard_id, sender->name, sender->human_nodename, primary->name, + primary->human_nodename, (unsigned long long)primary->configEpoch); } } else { /* `sender` was moved to another shard and has become a replica, remove its slot assignment */ int slots = clusterDelNodeSlots(sender); serverLog(LL_NOTICE, - "Node %.40s (%s) is no longer master of shard %.40s;" + "Node %.40s (%s) is no longer primary of shard %.40s;" " removed all %d slot(s) it used to own", sender->name, sender->human_nodename, sender->shard_id, slots); - if (master != NULL) { + if (primary != NULL) { serverLog(LL_NOTICE, "Node %.40s (%s) is now part of shard %.40s", sender->name, - sender->human_nodename, master->shard_id); + sender->human_nodename, primary->shard_id); } } - sender->flags &= ~(CLUSTER_NODE_MASTER | CLUSTER_NODE_MIGRATE_TO); - sender->flags |= CLUSTER_NODE_SLAVE; + sender->flags &= ~(CLUSTER_NODE_PRIMARY | CLUSTER_NODE_MIGRATE_TO); + sender->flags |= CLUSTER_NODE_REPLICA; /* Update config and state. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE); } - /* Master node changed for this slave? */ - if (master && sender->slaveof != master) { - if (sender->slaveof) clusterNodeRemoveSlave(sender->slaveof, sender); + /* Primary node changed for this replica? */ + if (primary && sender->replicaof != primary) { + if (sender->replicaof) clusterNodeRemoveReplica(sender->replicaof, sender); serverLog(LL_NOTICE, "Node %.40s (%s) is now a replica of node %.40s (%s) in shard %.40s", - sender->name, sender->human_nodename, master->name, master->human_nodename, + sender->name, sender->human_nodename, primary->name, primary->human_nodename, sender->shard_id); - clusterNodeAddSlave(master, sender); - sender->slaveof = master; + clusterNodeAddReplica(primary, sender); + sender->replicaof = primary; /* Update the shard_id when a replica is connected to its * primary in the very first time. */ - updateShardId(sender, master->shard_id); + updateShardId(sender, primary->shard_id); /* Update config. */ clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); @@ -3100,32 +3099,33 @@ int clusterProcessPacket(clusterLink *link) { /* Update our info about served slots. * - * Note: this MUST happen after we update the master/slave state - * so that CLUSTER_NODE_MASTER flag will be set. */ + * Note: this MUST happen after we update the primary/replica state + * so that CLUSTER_NODE_PRIMARY flag will be set. */ /* Many checks are only needed if the set of served slots this * instance claims is different compared to the set of slots we have * for it. Check this ASAP to avoid other computational expansive * checks later. */ - clusterNode *sender_master = NULL; /* Sender or its master if slave. */ - int dirty_slots = 0; /* Sender claimed slots don't match my view? */ + clusterNode *sender_primary = NULL; /* Sender or its primary if replica. */ + int dirty_slots = 0; /* Sender claimed slots don't match my view? */ if (sender) { - sender_master = clusterNodeIsMaster(sender) ? sender : sender->slaveof; - if (sender_master) { - dirty_slots = memcmp(sender_master->slots, hdr->myslots, sizeof(hdr->myslots)) != 0; + sender_primary = clusterNodeIsPrimary(sender) ? sender : sender->replicaof; + if (sender_primary) { + dirty_slots = memcmp(sender_primary->slots, hdr->myslots, sizeof(hdr->myslots)) != 0; /* Force dirty when the sending shard owns no slots so that * we have a chance to examine and repair slot migrating/importing * states that involve empty shards. */ - dirty_slots |= sender_master->numslots == 0; + dirty_slots |= sender_primary->numslots == 0; } } - /* 1) If the sender of the message is a master, and we detected that + /* 1) If the sender of the message is a primary, and we detected that * the set of slots it claims changed, scan the slots to see if we * need to update our configuration. */ - if (sender_master && dirty_slots) clusterUpdateSlotsConfigWith(sender_master, senderConfigEpoch, hdr->myslots); + if (sender_primary && dirty_slots) + clusterUpdateSlotsConfigWith(sender_primary, senderConfigEpoch, hdr->myslots); /* Explicitly check for a replication loop before attempting the replication * chain folding logic. @@ -3156,38 +3156,38 @@ int clusterProcessPacket(clusterLink *link) { * epoch than B has on slot 1. This leads to B sending an UPDATE to * A directly saying A* is the new owner of slot 1 with a higher epoch. * d. A receives the UPDATE from B and executes clusterUpdateSlotsConfigWith. - * A now realizes that it is a replica of A* hence setting myself->slaveof + * A now realizes that it is a replica of A* hence setting myself->replicaof * to A*. * e. Finally, the pre-failover PING message queued up in A*'s outgoing * buffer to A is delivered and processed, out of order though, to A. * f. This stale PING message creates the replication loop */ - if (myself->slaveof && myself->slaveof->slaveof && myself->slaveof->slaveof != myself) { - /* Safeguard against sub-replicas. A replica's master can turn itself + if (myself->replicaof && myself->replicaof->replicaof && myself->replicaof->replicaof != myself) { + /* Safeguard against sub-replicas. A replica's primary can turn itself * into a replica if its last slot is removed. If no other node takes * over the slot, there is nothing else to trigger replica migration. */ serverLog(LL_NOTICE, "I'm a sub-replica! Reconfiguring myself as a replica of %.40s from %.40s", - myself->slaveof->slaveof->name, myself->slaveof->name); - clusterSetMaster(myself->slaveof->slaveof, 1); + myself->replicaof->replicaof->name, myself->replicaof->name); + clusterSetPrimary(myself->replicaof->replicaof, 1); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } /* 2) We also check for the reverse condition, that is, the sender - * claims to serve slots we know are served by a master with a + * claims to serve slots we know are served by a primary with a * greater configEpoch. If this happens we inform the sender. * * This is useful because sometimes after a partition heals, a - * reappearing master may be the last one to claim a given set of + * reappearing primary may be the last one to claim a given set of * hash slots, but with a configuration that other instances know to * be deprecated. Example: * - * A and B are master and slave for slots 1,2,3. + * A and B are primary and replica for slots 1,2,3. * A is partitioned away, B gets promoted. * B is partitioned away, and A returns available. * * Usually B would PING A publishing its set of served slots and its * configEpoch, but because of the partition B can't inform A of the * new configuration, so other nodes that have an updated table must - * do it. In this way A will stop to act as a master (or can try to + * do it. In this way A will stop to act as a primary (or can try to * failover if there are the conditions to win the election). */ if (sender && dirty_slots) { int j; @@ -3213,7 +3213,7 @@ int clusterProcessPacket(clusterLink *link) { /* If our config epoch collides with the sender's try to fix * the problem. */ - if (sender && clusterNodeIsMaster(myself) && clusterNodeIsMaster(sender) && + if (sender && clusterNodeIsPrimary(myself) && clusterNodeIsPrimary(sender) && senderConfigEpoch == myself->configEpoch) { clusterHandleConfigEpochCollision(sender); } @@ -3263,10 +3263,10 @@ int clusterProcessPacket(clusterLink *link) { clusterSendFailoverAuthIfNeeded(sender, hdr); } else if (type == CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK) { if (!sender) return 1; /* We don't know that node. */ - /* We consider this vote only if the sender is a master serving + /* We consider this vote only if the sender is a primary serving * a non zero number of slots, and its currentEpoch is greater or * equal to epoch where this node started the election. */ - if (clusterNodeIsMaster(sender) && sender->numslots > 0 && + if (clusterNodeIsPrimary(sender) && sender->numslots > 0 && senderCurrentEpoch >= server.cluster->failover_auth_epoch) { server.cluster->failover_auth_count++; /* Maybe we reached a quorum here, set a flag to make sure @@ -3274,20 +3274,20 @@ int clusterProcessPacket(clusterLink *link) { clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); } } else if (type == CLUSTERMSG_TYPE_MFSTART) { - /* This message is acceptable only if I'm a master and the sender - * is one of my slaves. */ - if (!sender || sender->slaveof != myself) return 1; - /* Manual failover requested from slaves. Initialize the state + /* This message is acceptable only if I'm a primary and the sender + * is one of my replicas. */ + if (!sender || sender->replicaof != myself) return 1; + /* Manual failover requested from replicas. Initialize the state * accordingly. */ resetManualFailover(); server.cluster->mf_end = now + CLUSTER_MF_TIMEOUT; - server.cluster->mf_slave = sender; + server.cluster->mf_replica = sender; pauseActions(PAUSE_DURING_FAILOVER, now + (CLUSTER_MF_TIMEOUT * CLUSTER_MF_PAUSE_MULT), PAUSE_ACTIONS_CLIENT_WRITE_SET); serverLog(LL_NOTICE, "Manual failover requested by replica %.40s (%s).", sender->name, sender->human_nodename); /* We need to send a ping message to the replica, as it would carry - * `server.cluster->mf_master_offset`, which means the master paused clients - * at offset `server.cluster->mf_master_offset`, so that the replica would + * `server.cluster->mf_primary_offset`, which means the primary paused clients + * at offset `server.cluster->mf_primary_offset`, so that the replica would * know that it is safe to set its `server.cluster->mf_can_start` to 1 so as * to complete failover as quickly as possible. */ clusterSendPing(link, CLUSTERMSG_TYPE_PING); @@ -3300,8 +3300,8 @@ int clusterProcessPacket(clusterLink *link) { if (!n) return 1; /* We don't know the reported node. */ if (n->configEpoch >= reportedConfigEpoch) return 1; /* Nothing new. */ - /* If in our current config the node is a slave, set it as a master. */ - if (nodeIsSlave(n)) clusterSetNodeAsMaster(n); + /* If in our current config the node is a replica, set it as a primary. */ + if (nodeIsReplica(n)) clusterSetNodeAsPrimary(n); /* Update the node's configEpoch. */ n->configEpoch = reportedConfigEpoch; @@ -3551,13 +3551,13 @@ void clusterBroadcastMessage(clusterMsgSendBlock *msgblock) { * sizeof(clusterMsg) in bytes. */ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { uint64_t offset; - clusterNode *master; + clusterNode *primary; - /* If this node is a master, we send its slots bitmap and configEpoch. - * If this node is a slave we send the master's information instead (the - * node is flagged as slave so the receiver knows that it is NOT really + /* If this node is a primary, we send its slots bitmap and configEpoch. + * If this node is a replica we send the primary's information instead (the + * node is flagged as replica so the receiver knows that it is NOT really * in charge for this slots. */ - master = (nodeIsSlave(myself) && myself->slaveof) ? myself->slaveof : myself; + primary = (nodeIsReplica(myself) && myself->replicaof) ? myself->replicaof : myself; hdr->ver = htons(CLUSTER_PROTO_VER); hdr->sig[0] = 'R'; @@ -3579,9 +3579,9 @@ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { int announced_tcp_port, announced_tls_port, announced_cport; deriveAnnouncedPorts(&announced_tcp_port, &announced_tls_port, &announced_cport); - memcpy(hdr->myslots, master->slots, sizeof(hdr->myslots)); - memset(hdr->slaveof, 0, CLUSTER_NAMELEN); - if (myself->slaveof != NULL) memcpy(hdr->slaveof, myself->slaveof->name, CLUSTER_NAMELEN); + memcpy(hdr->myslots, primary->slots, sizeof(hdr->myslots)); + memset(hdr->replicaof, 0, CLUSTER_NAMELEN); + if (myself->replicaof != NULL) memcpy(hdr->replicaof, myself->replicaof->name, CLUSTER_NAMELEN); if (server.tls_cluster) { hdr->port = htons(announced_tls_port); hdr->pport = htons(announced_tcp_port); @@ -3595,17 +3595,17 @@ static void clusterBuildMessageHdr(clusterMsg *hdr, int type, size_t msglen) { /* Set the currentEpoch and configEpochs. */ hdr->currentEpoch = htonu64(server.cluster->currentEpoch); - hdr->configEpoch = htonu64(master->configEpoch); + hdr->configEpoch = htonu64(primary->configEpoch); /* Set the replication offset. */ - if (nodeIsSlave(myself)) - offset = replicationGetSlaveOffset(); + if (nodeIsReplica(myself)) + offset = replicationGetReplicaOffset(); else - offset = server.master_repl_offset; + offset = server.primary_repl_offset; hdr->offset = htonu64(offset); /* Set the message flags. */ - if (clusterNodeIsMaster(myself) && server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; + if (clusterNodeIsPrimary(myself) && server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; hdr->totlen = htonl(msglen); } @@ -3648,7 +3648,7 @@ void clusterSendPing(clusterLink *link, int type) { /* How many gossip sections we want to add? 1/10 of the number of nodes * and anyway at least 3. Why 1/10? * - * If we have N masters, with N/10 entries, and we consider that in + * If we have N primaries, with N/10 entries, and we consider that in * node_timeout we exchange with each other node at least 4 packets * (we ping in the worst case in node_timeout/2 time, and we also * receive two pings from the host), we have a total of 8 packets @@ -3661,14 +3661,14 @@ void clusterSendPing(clusterLink *link, int type) { * PROB = probability of being featured in a single gossip entry, * which is 1 / NUM_OF_NODES. * ENTRIES = 10. - * TOTAL_PACKETS = 2 * 4 * NUM_OF_MASTERS. + * TOTAL_PACKETS = 2 * 4 * NUM_OF_PRIMARIES. * - * If we assume we have just masters (so num of nodes and num of masters + * If we assume we have just primaries (so num of nodes and num of primaries * is the same), with 1/10 we always get over the majority, and specifically - * 80% of the number of nodes, to account for many masters failing at the + * 80% of the number of nodes, to account for many primaries failing at the * same time. * - * Since we have non-voting slaves that lower the probability of an entry + * Since we have non-voting replicas that lower the probability of an entry * to feature our node, we set the number of entries per packet as * 10% of the total nodes we have. */ wanted = floor(dictSize(server.cluster->nodes) / 10); @@ -3777,16 +3777,16 @@ void clusterSendPing(clusterLink *link, int type) { * In Cluster mode, pongs are not used just for failure detection, but also * to carry important configuration information. So broadcasting a pong is * useful when something changes in the configuration and we want to make - * the cluster aware ASAP (for instance after a slave promotion). + * the cluster aware ASAP (for instance after a replica promotion). * * The 'target' argument specifies the receiving instances using the * defines below: * * CLUSTER_BROADCAST_ALL -> All known instances. - * CLUSTER_BROADCAST_LOCAL_SLAVES -> All slaves in my master-slaves ring. + * CLUSTER_BROADCAST_LOCAL_REPLICAS -> All replicas in my primary-replicas ring. */ #define CLUSTER_BROADCAST_ALL 0 -#define CLUSTER_BROADCAST_LOCAL_SLAVES 1 +#define CLUSTER_BROADCAST_LOCAL_REPLICAS 1 void clusterBroadcastPong(int target) { dictIterator *di; dictEntry *de; @@ -3797,10 +3797,10 @@ void clusterBroadcastPong(int target) { if (!node->link) continue; if (node == myself || nodeInHandshake(node)) continue; - if (target == CLUSTER_BROADCAST_LOCAL_SLAVES) { - int local_slave = - nodeIsSlave(node) && node->slaveof && (node->slaveof == myself || node->slaveof == myself->slaveof); - if (!local_slave) continue; + if (target == CLUSTER_BROADCAST_LOCAL_REPLICAS) { + int local_replica = nodeIsReplica(node) && node->replicaof && + (node->replicaof == myself || node->replicaof == myself->replicaof); + if (!local_replica) continue; } clusterSendPing(node->link, CLUSTERMSG_TYPE_PONG); } @@ -3956,15 +3956,15 @@ void clusterPropagatePublish(robj *channel, robj *message, int sharded) { } /* ----------------------------------------------------------------------------- - * SLAVE node specific functions + * REPLICA node specific functions * -------------------------------------------------------------------------- */ /* This function sends a FAILOVER_AUTH_REQUEST message to every node in order to - * see if there is the quorum for this slave instance to failover its failing - * master. + * see if there is the quorum for this replica instance to failover its failing + * primary. * - * Note that we send the failover request to everybody, master and slave nodes, - * but only the masters are supposed to reply to our query. */ + * Note that we send the failover request to everybody, primary and replica nodes, + * but only the primaries are supposed to reply to our query. */ void clusterRequestFailoverAuth(void) { uint32_t msglen = sizeof(clusterMsg) - sizeof(union clusterMsgData); clusterMsgSendBlock *msgblock = createClusterMsgSendBlock(CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST, msglen); @@ -3972,7 +3972,7 @@ void clusterRequestFailoverAuth(void) { clusterMsg *hdr = &msgblock->msg; /* If this is a manual failover, set the CLUSTERMSG_FLAG0_FORCEACK bit * in the header to communicate the nodes receiving the message that - * they should authorized the failover even if the master is working. */ + * they should authorized the failover even if the primary is working. */ if (server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_FORCEACK; clusterBroadcastMessage(msgblock); clusterMsgSendBlockDecrRefCount(msgblock); @@ -4002,18 +4002,18 @@ void clusterSendMFStart(clusterNode *node) { /* Vote for the node asking for our vote if there are the conditions. */ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { - clusterNode *master = node->slaveof; + clusterNode *primary = node->replicaof; uint64_t requestCurrentEpoch = ntohu64(request->currentEpoch); uint64_t requestConfigEpoch = ntohu64(request->configEpoch); unsigned char *claimed_slots = request->myslots; int force_ack = request->mflags[0] & CLUSTERMSG_FLAG0_FORCEACK; int j; - /* IF we are not a master serving at least 1 slot, we don't have the + /* IF we are not a primary serving at least 1 slot, we don't have the * right to vote, as the cluster size is the number - * of masters serving at least one slot, and quorum is the cluster + * of primariies serving at least one slot, and quorum is the cluster * size + 1 */ - if (nodeIsSlave(myself) || myself->numslots == 0) return; + if (nodeIsReplica(myself) || myself->numslots == 0) return; /* Request epoch must be >= our currentEpoch. * Note that it is impossible for it to actually be greater since @@ -4033,37 +4033,37 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { return; } - /* Node must be a slave and its master down. - * The master can be non failing if the request is flagged + /* Node must be a replica and its primary down. + * The primary can be non failing if the request is flagged * with CLUSTERMSG_FLAG0_FORCEACK (manual failover). */ - if (clusterNodeIsMaster(node) || master == NULL || (!nodeFailed(master) && !force_ack)) { - if (clusterNodeIsMaster(node)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): it is a master node", node->name, + if (clusterNodeIsPrimary(node) || primary == NULL || (!nodeFailed(primary) && !force_ack)) { + if (clusterNodeIsPrimary(node)) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): it is a primary node", node->name, node->human_nodename); - } else if (master == NULL) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): I don't know its master", node->name, + } else if (primary == NULL) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): I don't know its primary", node->name, node->human_nodename); - } else if (!nodeFailed(master)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): its master is up", node->name, + } else if (!nodeFailed(primary)) { + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): its primary is up", node->name, node->human_nodename); } return; } - /* We did not voted for a slave about this master for two + /* We did not voted for a replica about this primary for two * times the node timeout. This is not strictly needed for correctness * of the algorithm but makes the base case more linear. */ - if (mstime() - node->slaveof->voted_time < server.cluster_node_timeout * 2) { + if (mstime() - node->replicaof->voted_time < server.cluster_node_timeout * 2) { serverLog(LL_WARNING, "Failover auth denied to %.40s %s: " - "can't vote about this master before %lld milliseconds", + "can't vote about this primary before %lld milliseconds", node->name, node->human_nodename, - (long long)((server.cluster_node_timeout * 2) - (mstime() - node->slaveof->voted_time))); + (long long)((server.cluster_node_timeout * 2) - (mstime() - node->replicaof->voted_time))); return; } - /* The slave requesting the vote must have a configEpoch for the claimed - * slots that is >= the one of the masters currently serving the same + /* The replica requesting the vote must have a configEpoch for the claimed + * slots that is >= the one of the primaries currently serving the same * slots in the current configuration. */ for (j = 0; j < CLUSTER_SLOTS; j++) { if (bitmapTestBit(claimed_slots, j) == 0) continue; @@ -4071,8 +4071,8 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { continue; } /* If we reached this point we found a slot that in our current slots - * is served by a master with a greater configEpoch than the one claimed - * by the slave requesting our vote. Refuse to vote for this slave. */ + * is served by a primary with a greater configEpoch than the one claimed + * by the replica requesting our vote. Refuse to vote for this replica. */ serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): " "slot %d epoch (%llu) > reqEpoch (%llu)", @@ -4081,46 +4081,46 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { return; } - /* We can vote for this slave. */ + /* We can vote for this replica. */ server.cluster->lastVoteEpoch = server.cluster->currentEpoch; - node->slaveof->voted_time = mstime(); + node->replicaof->voted_time = mstime(); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_FSYNC_CONFIG); clusterSendFailoverAuth(node); serverLog(LL_NOTICE, "Failover auth granted to %.40s (%s) for epoch %llu", node->name, node->human_nodename, (unsigned long long)server.cluster->currentEpoch); } -/* This function returns the "rank" of this instance, a slave, in the context - * of its master-slaves ring. The rank of the slave is given by the number of - * other slaves for the same master that have a better replication offset +/* This function returns the "rank" of this instance, a replica, in the context + * of its primar-replicas ring. The rank of the replica is given by the number of + * other replicas for the same primary that have a better replication offset * compared to the local one (better means, greater, so they claim more data). * - * A slave with rank 0 is the one with the greatest (most up to date) + * A replica with rank 0 is the one with the greatest (most up to date) * replication offset, and so forth. Note that because how the rank is computed - * multiple slaves may have the same rank, in case they have the same offset. + * multiple replicas may have the same rank, in case they have the same offset. * - * The slave rank is used to add a delay to start an election in order to - * get voted and replace a failing master. Slaves with better replication + * The replica rank is used to add a delay to start an election in order to + * get voted and replace a failing primary. Replicas with better replication * offsets are more likely to win. */ -int clusterGetSlaveRank(void) { +int clusterGetReplicaRank(void) { long long myoffset; int j, rank = 0; - clusterNode *master; + clusterNode *primary; - serverAssert(nodeIsSlave(myself)); - master = myself->slaveof; - if (master == NULL) return 0; /* Never called by slaves without master. */ + serverAssert(nodeIsReplica(myself)); + primary = myself->replicaof; + if (primary == NULL) return 0; /* Never called by replicas without primary. */ - myoffset = replicationGetSlaveOffset(); - for (j = 0; j < master->numslaves; j++) - if (master->slaves[j] != myself && !nodeCantFailover(master->slaves[j]) && - master->slaves[j]->repl_offset > myoffset) + myoffset = replicationGetReplicaOffset(); + for (j = 0; j < primary->num_replicas; j++) + if (primary->replicas[j] != myself && !nodeCantFailover(primary->replicas[j]) && + primary->replicas[j]->repl_offset > myoffset) rank++; return rank; } -/* This function is called by clusterHandleSlaveFailover() in order to - * let the slave log why it is not able to failover. Sometimes there are +/* This function is called by clusterHandleReplicaFailover() in order to + * let the replica log why it is not able to failover. Sometimes there are * not the conditions, but since the failover function is called again and * again, we can't log the same things continuously. * @@ -4129,18 +4129,18 @@ int clusterGetSlaveRank(void) { * * 1) The reason for which the failover can't be initiated changed. * The reasons also include a NONE reason we reset the state to - * when the slave finds that its master is fine (no FAIL flag). - * 2) Also, the log is emitted again if the master is still down and + * when the replica finds that its primary is fine (no FAIL flag). + * 2) Also, the log is emitted again if the primary is still down and * the reason for not failing over is still the same, but more than * CLUSTER_CANT_FAILOVER_RELOG_PERIOD seconds elapsed. - * 3) Finally, the function only logs if the slave is down for more than + * 3) Finally, the function only logs if the replica is down for more than * five seconds + NODE_TIMEOUT. This way nothing is logged when a * failover starts in a reasonable time. * - * The function is called with the reason why the slave can't failover + * The function is called with the reason why the replica can't failover * which is one of the integer macros CLUSTER_CANT_FAILOVER_*. * - * The function is guaranteed to be called only if 'myself' is a slave. */ + * The function is guaranteed to be called only if 'myself' is a replica. */ void clusterLogCantFailover(int reason) { char *msg; static time_t lastlog_time = 0; @@ -4153,10 +4153,11 @@ void clusterLogCantFailover(int reason) { server.cluster->cant_failover_reason = reason; - /* We also don't emit any log if the master failed no long ago, the - * goal of this function is to log slaves in a stalled condition for + /* We also don't emit any log if the primary failed no long ago, the + * goal of this function is to log replicas in a stalled condition for * a long time. */ - if (myself->slaveof && nodeFailed(myself->slaveof) && (mstime() - myself->slaveof->fail_time) < nolog_fail_time) + if (myself->replicaof && nodeFailed(myself->replicaof) && + (mstime() - myself->replicaof->fail_time) < nolog_fail_time) return; switch (reason) { @@ -4182,24 +4183,24 @@ void clusterLogCantFailover(int reason) { } /* This function implements the final part of automatic and manual failovers, - * where the slave grabs its master's hash slots, and propagates the new + * where the replica grabs its primary's hash slots, and propagates the new * configuration. * * Note that it's up to the caller to be sure that the node got a new * configuration epoch already. */ -void clusterFailoverReplaceYourMaster(void) { +void clusterFailoverReplaceYourPrimary(void) { int j; - clusterNode *oldmaster = myself->slaveof; + clusterNode *old_primary = myself->replicaof; - if (clusterNodeIsMaster(myself) || oldmaster == NULL) return; + if (clusterNodeIsPrimary(myself) || old_primary == NULL) return; - /* 1) Turn this node into a master. */ - clusterSetNodeAsMaster(myself); - replicationUnsetMaster(); + /* 1) Turn this node into a primary . */ + clusterSetNodeAsPrimary(myself); + replicationUnsetPrimary(); - /* 2) Claim all the slots assigned to our master. */ + /* 2) Claim all the slots assigned to our primary. */ for (j = 0; j < CLUSTER_SLOTS; j++) { - if (clusterNodeCoversSlot(oldmaster, j)) { + if (clusterNodeCoversSlot(old_primary, j)) { clusterDelSlot(j); clusterAddSlot(myself, j); } @@ -4210,22 +4211,22 @@ void clusterFailoverReplaceYourMaster(void) { clusterSaveConfigOrDie(1); /* 4) Pong all the other nodes so that they can update the state - * accordingly and detect that we switched to master role. */ + * accordingly and detect that we switched to primary role. */ clusterBroadcastPong(CLUSTER_BROADCAST_ALL); /* 5) If there was a manual failover in progress, clear the state. */ resetManualFailover(); } -/* This function is called if we are a slave node and our master serving +/* This function is called if we are a replica node and our primary serving * a non-zero amount of hash slots is in FAIL state. * * The goal of this function is: * 1) To check if we are able to perform a failover, is our data updated? - * 2) Try to get elected by masters. + * 2) Try to get elected by primaries. * 3) Perform the failover informing all the other nodes. */ -void clusterHandleSlaveFailover(void) { +void clusterHandleReplicaFailover(void) { mstime_t data_age; mstime_t auth_age = mstime() - server.cluster->failover_auth_time; int needed_quorum = (server.cluster->size / 2) + 1; @@ -4247,12 +4248,13 @@ void clusterHandleSlaveFailover(void) { /* Pre conditions to run the function, that must be met both in case * of an automatic or manual failover: - * 1) We are a slave. - * 2) Our master is flagged as FAIL, or this is a manual failover. + * 1) We are a replica. + * 2) Our primary is flagged as FAIL, or this is a manual failover. * 3) We don't have the no failover configuration set, and this is * not a manual failover. */ - if (clusterNodeIsMaster(myself) || myself->slaveof == NULL || (!nodeFailed(myself->slaveof) && !manual_failover) || - (server.cluster_slave_no_failover && !manual_failover)) { + if (clusterNodeIsPrimary(myself) || myself->replicaof == NULL || + (!nodeFailed(myself->replicaof) && !manual_failover) || + (server.cluster_replica_no_failover && !manual_failover)) { /* There are no reasons to failover, so we set the reason why we * are returning without failing over to NONE. */ server.cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; @@ -4260,25 +4262,25 @@ void clusterHandleSlaveFailover(void) { } /* Set data_age to the number of milliseconds we are disconnected from - * the master. */ + * the primary. */ if (server.repl_state == REPL_STATE_CONNECTED) { - data_age = (mstime_t)(server.unixtime - server.master->lastinteraction) * 1000; + data_age = (mstime_t)(server.unixtime - server.primary->last_interaction) * 1000; } else { data_age = (mstime_t)(server.unixtime - server.repl_down_since) * 1000; } /* Remove the node timeout from the data age as it is fine that we are - * disconnected from our master at least for the time it was down to be + * disconnected from our primary at least for the time it was down to be * flagged as FAIL, that's the baseline. */ if (data_age > server.cluster_node_timeout) data_age -= server.cluster_node_timeout; - /* Check if our data is recent enough according to the slave validity + /* Check if our data is recent enough according to the replica validity * factor configured by the user. * * Check bypassed for manual failovers. */ - if (server.cluster_slave_validity_factor && - data_age > (((mstime_t)server.repl_ping_slave_period * 1000) + - (server.cluster_node_timeout * server.cluster_slave_validity_factor))) { + if (server.cluster_replica_validity_factor && + data_age > (((mstime_t)server.repl_ping_replica_period * 1000) + + (server.cluster_node_timeout * server.cluster_replica_validity_factor))) { if (!manual_failover) { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_DATA_AGE); return; @@ -4293,9 +4295,9 @@ void clusterHandleSlaveFailover(void) { random() % 500; /* Random delay between 0 and 500 milliseconds. */ server.cluster->failover_auth_count = 0; server.cluster->failover_auth_sent = 0; - server.cluster->failover_auth_rank = clusterGetSlaveRank(); - /* We add another delay that is proportional to the slave rank. - * Specifically 1 second * rank. This way slaves that have a probably + server.cluster->failover_auth_rank = clusterGetReplicaRank(); + /* We add another delay that is proportional to the replica rank. + * Specifically 1 second * rank. This way replicas that have a probably * less updated replication offset, are penalized. */ server.cluster->failover_auth_time += server.cluster->failover_auth_rank * 1000; /* However if this is a manual failover, no delay is needed. */ @@ -4308,21 +4310,21 @@ void clusterHandleSlaveFailover(void) { "Start of election delayed for %lld milliseconds " "(rank #%d, offset %lld).", server.cluster->failover_auth_time - mstime(), server.cluster->failover_auth_rank, - replicationGetSlaveOffset()); + replicationGetReplicaOffset()); /* Now that we have a scheduled election, broadcast our offset - * to all the other slaves so that they'll updated their offsets + * to all the other replicas so that they'll updated their offsets * if our offset is better. */ - clusterBroadcastPong(CLUSTER_BROADCAST_LOCAL_SLAVES); + clusterBroadcastPong(CLUSTER_BROADCAST_LOCAL_REPLICAS); return; } /* It is possible that we received more updated offsets from other - * slaves for the same master since we computed our election delay. + * replicas for the same primary since we computed our election delay. * Update the delay if our rank changed. * * Not performed if this is a manual failover. */ if (server.cluster->failover_auth_sent == 0 && server.cluster->mf_end == 0) { - int newrank = clusterGetSlaveRank(); + int newrank = clusterGetReplicaRank(); if (newrank > server.cluster->failover_auth_rank) { long long added_delay = (newrank - server.cluster->failover_auth_rank) * 1000; server.cluster->failover_auth_time += added_delay; @@ -4358,9 +4360,9 @@ void clusterHandleSlaveFailover(void) { /* Check if we reached the quorum. */ if (server.cluster->failover_auth_count >= needed_quorum) { - /* We have the quorum, we can finally failover the master. */ + /* We have the quorum, we can finally failover the primary. */ - serverLog(LL_NOTICE, "Failover election won: I'm the new master."); + serverLog(LL_NOTICE, "Failover election won: I'm the new primary."); /* Update my configEpoch to the epoch of the election. */ if (myself->configEpoch < server.cluster->failover_auth_epoch) { @@ -4370,99 +4372,99 @@ void clusterHandleSlaveFailover(void) { } /* Take responsibility for the cluster slots. */ - clusterFailoverReplaceYourMaster(); + clusterFailoverReplaceYourPrimary(); } else { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES); } } /* ----------------------------------------------------------------------------- - * CLUSTER slave migration + * CLUSTER replica migration * - * Slave migration is the process that allows a slave of a master that is - * already covered by at least another slave, to "migrate" to a master that - * is orphaned, that is, left with no working slaves. + * Replica migration is the process that allows a replica of a primary that is + * already covered by at least another replica, to "migrate" to a primary that + * is orphaned, that is, left with no working replicas. * ------------------------------------------------------------------------- */ /* This function is responsible to decide if this replica should be migrated - * to a different (orphaned) master. It is called by the clusterCron() function + * to a different (orphaned) primary. It is called by the clusterCron() function * only if: * - * 1) We are a slave node. - * 2) It was detected that there is at least one orphaned master in + * 1) We are a replica node. + * 2) It was detected that there is at least one orphaned primary in * the cluster. - * 3) We are a slave of one of the masters with the greatest number of - * slaves. + * 3) We are a replica of one of the primaries with the greatest number of + * replicas. * * This checks are performed by the caller since it requires to iterate - * the nodes anyway, so we spend time into clusterHandleSlaveMigration() + * the nodes anyway, so we spend time into clusterHandleReplicaMigration() * if definitely needed. * - * The function is called with a pre-computed max_slaves, that is the max - * number of working (not in FAIL state) slaves for a single master. + * The function is called with a pre-computed max_replicas, that is the max + * number of working (not in FAIL state) replicas for a single primary. * * Additional conditions for migration are examined inside the function. */ -void clusterHandleSlaveMigration(int max_slaves) { - int j, okslaves = 0; - clusterNode *mymaster = myself->slaveof, *target = NULL, *candidate = NULL; +void clusterHandleReplicaMigration(int max_replicas) { + int j, ok_replicas = 0; + clusterNode *my_primary = myself->replicaof, *target = NULL, *candidate = NULL; dictIterator *di; dictEntry *de; /* Step 1: Don't migrate if the cluster state is not ok. */ if (server.cluster->state != CLUSTER_OK) return; - /* Step 2: Don't migrate if my master will not be left with at least - * 'migration-barrier' slaves after my migration. */ - if (mymaster == NULL) return; - for (j = 0; j < mymaster->numslaves; j++) - if (!nodeFailed(mymaster->slaves[j]) && !nodeTimedOut(mymaster->slaves[j])) okslaves++; - if (okslaves <= server.cluster_migration_barrier) return; + /* Step 2: Don't migrate if my primary will not be left with at least + * 'migration-barrier' replicas after my migration. */ + if (my_primary == NULL) return; + for (j = 0; j < my_primary->num_replicas; j++) + if (!nodeFailed(my_primary->replicas[j]) && !nodeTimedOut(my_primary->replicas[j])) ok_replicas++; + if (ok_replicas <= server.cluster_migration_barrier) return; /* Step 3: Identify a candidate for migration, and check if among the - * masters with the greatest number of ok slaves, I'm the one with the - * smallest node ID (the "candidate slave"). + * primaries with the greatest number of ok replicas, I'm the one with the + * smallest node ID (the "candidate replica"). * * Note: this means that eventually a replica migration will occur - * since slaves that are reachable again always have their FAIL flag + * since replicas that are reachable again always have their FAIL flag * cleared, so eventually there must be a candidate. * There is a possible race condition causing multiple - * slaves to migrate at the same time, but this is unlikely to + * replicas to migrate at the same time, but this is unlikely to * happen and relatively harmless when it does. */ candidate = myself; di = dictGetSafeIterator(server.cluster->nodes); while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); - int okslaves = 0, is_orphaned = 1; + int ok_replicas = 0, is_orphaned = 1; - /* We want to migrate only if this master is working, orphaned, and - * used to have slaves or if failed over a master that had slaves + /* We want to migrate only if this primary is working, orphaned, and + * used to have replicas or if failed over a primary that had replicas * (MIGRATE_TO flag). This way we only migrate to instances that were * supposed to have replicas. */ - if (nodeIsSlave(node) || nodeFailed(node)) is_orphaned = 0; + if (nodeIsReplica(node) || nodeFailed(node)) is_orphaned = 0; if (!(node->flags & CLUSTER_NODE_MIGRATE_TO)) is_orphaned = 0; - /* Check number of working slaves. */ - if (clusterNodeIsMaster(node)) okslaves = clusterCountNonFailingSlaves(node); - if (okslaves > 0) is_orphaned = 0; + /* Check number of working replicas. */ + if (clusterNodeIsPrimary(node)) ok_replicas = clusterCountNonFailingReplicas(node); + if (ok_replicas > 0) is_orphaned = 0; if (is_orphaned) { if (!target && node->numslots > 0) target = node; /* Track the starting time of the orphaned condition for this - * master. */ + * primary. */ if (!node->orphaned_time) node->orphaned_time = mstime(); } else { node->orphaned_time = 0; } - /* Check if I'm the slave candidate for the migration: attached - * to a master with the maximum number of slaves and with the smallest + /* Check if I'm the replica candidate for the migration: attached + * to a primary with the maximum number of replicas and with the smallest * node ID. */ - if (okslaves == max_slaves) { - for (j = 0; j < node->numslaves; j++) { - if (memcmp(node->slaves[j]->name, candidate->name, CLUSTER_NAMELEN) < 0) { - candidate = node->slaves[j]; + if (ok_replicas == max_replicas) { + for (j = 0; j < node->num_replicas; j++) { + if (memcmp(node->replicas[j]->name, candidate->name, CLUSTER_NAMELEN) < 0) { + candidate = node->replicas[j]; } } } @@ -4470,62 +4472,62 @@ void clusterHandleSlaveMigration(int max_slaves) { dictReleaseIterator(di); /* Step 4: perform the migration if there is a target, and if I'm the - * candidate, but only if the master is continuously orphaned for a + * candidate, but only if the primary is continuously orphaned for a * couple of seconds, so that during failovers, we give some time to - * the natural slaves of this instance to advertise their switch from - * the old master to the new one. */ - if (target && candidate == myself && (mstime() - target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY && + * the natural replicas of this instance to advertise their switch from + * the old primary to the new one. */ + if (target && candidate == myself && (mstime() - target->orphaned_time) > CLUSTER_REPLICA_MIGRATION_DELAY && !(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) { - serverLog(LL_NOTICE, "Migrating to orphaned master %.40s (%s) in shard %.40s", target->name, + serverLog(LL_NOTICE, "Migrating to orphaned primary %.40s (%s) in shard %.40s", target->name, target->human_nodename, target->shard_id); - clusterSetMaster(target, 1); + clusterSetPrimary(target, 1); } } /* ----------------------------------------------------------------------------- * CLUSTER manual failover * - * This are the important steps performed by slaves during a manual failover: + * This are the important steps performed by replicas during a manual failover: * 1) User send CLUSTER FAILOVER command. The failover state is initialized * setting mf_end to the millisecond unix time at which we'll abort the * attempt. - * 2) Slave sends a MFSTART message to the master requesting to pause clients + * 2) Replica sends a MFSTART message to the primary requesting to pause clients * for two times the manual failover timeout CLUSTER_MF_TIMEOUT. - * When master is paused for manual failover, it also starts to flag + * When primary is paused for manual failover, it also starts to flag * packets with CLUSTERMSG_FLAG0_PAUSED. - * 3) Slave waits for master to send its replication offset flagged as PAUSED. - * 4) If slave received the offset from the master, and its offset matches, - * mf_can_start is set to 1, and clusterHandleSlaveFailover() will perform + * 3) Replica waits for primary to send its replication offset flagged as PAUSED. + * 4) If replica received the offset from the primary, and its offset matches, + * mf_can_start is set to 1, and clusterHandleReplicaFailover() will perform * the failover as usually, with the difference that the vote request - * will be modified to force masters to vote for a slave that has a - * working master. + * will be modified to force primaries to vote for a replica that has a + * working primary. * - * From the point of view of the master things are simpler: when a - * PAUSE_CLIENTS packet is received the master sets mf_end as well and - * the sender in mf_slave. During the time limit for the manual failover - * the master will just send PINGs more often to this slave, flagged with - * the PAUSED flag, so that the slave will set mf_master_offset when receiving - * a packet from the master with this flag set. + * From the point of view of the primary things are simpler: when a + * PAUSE_CLIENTS packet is received the primary sets mf_end as well and + * the sender in mf_replica. During the time limit for the manual failover + * the primary will just send PINGs more often to this replica, flagged with + * the PAUSED flag, so that the replica will set mf_primary_offset when receiving + * a packet from the primary with this flag set. * * The goal of the manual failover is to perform a fast failover without - * data loss due to the asynchronous master-slave replication. + * data loss due to the asynchronous primary-replica replication. * -------------------------------------------------------------------------- */ -/* Reset the manual failover state. This works for both masters and slaves +/* Reset the manual failover state. This works for both primaries and replicas * as all the state about manual failover is cleared. * * The function can be used both to initialize the manual failover state at * startup or to abort a manual failover in progress. */ void resetManualFailover(void) { - if (server.cluster->mf_slave) { - /* We were a master failing over, so we paused clients and related actions. + if (server.cluster->mf_replica) { + /* We were a primary failing over, so we paused clients and related actions. * Regardless of the outcome we unpause now to allow traffic again. */ unpauseActions(PAUSE_DURING_FAILOVER); } server.cluster->mf_end = 0; /* No manual failover in progress. */ server.cluster->mf_can_start = 0; - server.cluster->mf_slave = NULL; - server.cluster->mf_master_offset = -1; + server.cluster->mf_replica = NULL; + server.cluster->mf_primary_offset = -1; } /* If a manual failover timed out, abort it. */ @@ -4543,16 +4545,16 @@ void clusterHandleManualFailover(void) { if (server.cluster->mf_end == 0) return; /* If mf_can_start is non-zero, the failover was already triggered so the - * next steps are performed by clusterHandleSlaveFailover(). */ + * next steps are performed by clusterHandleReplicaFailover(). */ if (server.cluster->mf_can_start) return; - if (server.cluster->mf_master_offset == -1) return; /* Wait for offset... */ + if (server.cluster->mf_primary_offset == -1) return; /* Wait for offset... */ - if (server.cluster->mf_master_offset == replicationGetSlaveOffset()) { - /* Our replication offset matches the master replication offset + if (server.cluster->mf_primary_offset == replicationGetReplicaOffset()) { + /* Our replication offset matches the primary replication offset * announced after clients were paused. We can start the failover. */ server.cluster->mf_can_start = 1; - serverLog(LL_NOTICE, "All master replication stream processed, " + serverLog(LL_NOTICE, "All primary replication stream processed, " "manual failover can start."); clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); return; @@ -4632,9 +4634,9 @@ void clusterCron(void) { dictIterator *di; dictEntry *de; int update_state = 0; - int orphaned_masters; /* How many masters there are without ok slaves. */ - int max_slaves; /* Max number of ok slaves for a single master. */ - int this_slaves; /* Number of ok slaves for our master (if we are slave). */ + int orphaned_primaries; /* How many primaries there are without ok replicas. */ + int max_replicas; /* Max number of ok replicas for a single primary. */ + int this_replicas; /* Number of ok replicas for our primary (if we are replica). */ mstime_t min_pong = 0, now = mstime(); clusterNode *min_pong_node = NULL; static unsigned long long iteration = 0; @@ -4694,13 +4696,13 @@ void clusterCron(void) { /* Iterate nodes to check if we need to flag something as failing. * This loop is also responsible to: - * 1) Check if there are orphaned masters (masters without non failing - * slaves). - * 2) Count the max number of non failing slaves for a single master. - * 3) Count the number of slaves for our master, if we are a slave. */ - orphaned_masters = 0; - max_slaves = 0; - this_slaves = 0; + * 1) Check if there are orphaned primaries (primaries without non failing + * replicas). + * 2) Count the max number of non failing replicas for a single primary. + * 3) Count the number of replicas for our primary, if we are a replica. */ + orphaned_primaries = 0; + max_replicas = 0; + this_replicas = 0; di = dictGetSafeIterator(server.cluster->nodes); while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); @@ -4708,19 +4710,19 @@ void clusterCron(void) { if (node->flags & (CLUSTER_NODE_MYSELF | CLUSTER_NODE_NOADDR | CLUSTER_NODE_HANDSHAKE)) continue; - /* Orphaned master check, useful only if the current instance - * is a slave that may migrate to another master. */ - if (nodeIsSlave(myself) && clusterNodeIsMaster(node) && !nodeFailed(node)) { - int okslaves = clusterCountNonFailingSlaves(node); + /* Orphaned primary check, useful only if the current instance + * is a replica that may migrate to another primary. */ + if (nodeIsReplica(myself) && clusterNodeIsPrimary(node) && !nodeFailed(node)) { + int ok_replicas = clusterCountNonFailingReplicas(node); - /* A master is orphaned if it is serving a non-zero number of - * slots, have no working slaves, but used to have at least one - * slave, or failed over a master that used to have slaves. */ - if (okslaves == 0 && node->numslots > 0 && node->flags & CLUSTER_NODE_MIGRATE_TO) { - orphaned_masters++; + /* A primary is orphaned if it is serving a non-zero number of + * slots, have no working replicas, but used to have at least one + * replica, or failed over a primary that used to have replicas. */ + if (ok_replicas == 0 && node->numslots > 0 && node->flags & CLUSTER_NODE_MIGRATE_TO) { + orphaned_primaries++; } - if (okslaves > max_slaves) max_slaves = okslaves; - if (myself->slaveof == node) this_slaves = okslaves; + if (ok_replicas > max_replicas) max_replicas = ok_replicas; + if (myself->replicaof == node) this_replicas = ok_replicas; } /* If we are not receiving any data for more than half the cluster @@ -4750,9 +4752,10 @@ void clusterCron(void) { continue; } - /* If we are a master and one of the slaves requested a manual + /* If we are a primary and one of the replicas requested a manual * failover, ping it continuously. */ - if (server.cluster->mf_end && clusterNodeIsMaster(myself) && server.cluster->mf_slave == node && node->link) { + if (server.cluster->mf_end && clusterNodeIsPrimary(myself) && server.cluster->mf_replica == node && + node->link) { clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); continue; } @@ -4776,7 +4779,7 @@ void clusterCron(void) { if (!(node->flags & (CLUSTER_NODE_PFAIL | CLUSTER_NODE_FAIL))) { node->flags |= CLUSTER_NODE_PFAIL; update_state = 1; - if (clusterNodeIsMaster(myself) && server.cluster->size == 1) { + if (clusterNodeIsPrimary(myself) && server.cluster->size == 1) { markNodeAsFailingIfNeeded(node); } else { serverLog(LL_DEBUG, "*** NODE %.40s possibly failing", node->name); @@ -4786,26 +4789,27 @@ void clusterCron(void) { } dictReleaseIterator(di); - /* If we are a slave node but the replication is still turned off, - * enable it if we know the address of our master and it appears to + /* If we are a replica node but the replication is still turned off, + * enable it if we know the address of our primary and it appears to * be up. */ - if (nodeIsSlave(myself) && server.masterhost == NULL && myself->slaveof && nodeHasAddr(myself->slaveof)) { - replicationSetMaster(myself->slaveof->ip, getNodeDefaultReplicationPort(myself->slaveof)); + if (nodeIsReplica(myself) && server.primary_host == NULL && myself->replicaof && nodeHasAddr(myself->replicaof)) { + replicationSetPrimary(myself->replicaof->ip, getNodeDefaultReplicationPort(myself->replicaof)); } /* Abort a manual failover if the timeout is reached. */ manualFailoverCheckTimeout(); - if (nodeIsSlave(myself)) { + if (nodeIsReplica(myself)) { clusterHandleManualFailover(); - if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleSlaveFailover(); - /* If there are orphaned slaves, and we are a slave among the masters - * with the max number of non-failing slaves, consider migrating to - * the orphaned masters. Note that it does not make sense to try - * a migration if there is no master with at least *two* working - * slaves. */ - if (orphaned_masters && max_slaves >= 2 && this_slaves == max_slaves && server.cluster_allow_replica_migration) - clusterHandleSlaveMigration(max_slaves); + if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleReplicaFailover(); + /* If there are orphaned replicas, and we are a replica among the primaries + * with the max number of non-failing replicas, consider migrating to + * the orphaned primaries. Note that it does not make sense to try + * a migration if there is no primary with at least *two* working + * replicas. */ + if (orphaned_primaries && max_replicas >= 2 && this_replicas == max_replicas && + server.cluster_allow_replica_migration) + clusterHandleReplicaMigration(max_replicas); } if (update_state || server.cluster->state == CLUSTER_FAIL) clusterUpdateState(); @@ -4826,14 +4830,14 @@ void clusterBeforeSleep(void) { if (flags & CLUSTER_TODO_HANDLE_MANUALFAILOVER) { /* Handle manual failover as soon as possible so that won't have a 100ms * as it was handled only in clusterCron */ - if (nodeIsSlave(myself)) { + if (nodeIsReplica(myself)) { clusterHandleManualFailover(); - if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleSlaveFailover(); + if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleReplicaFailover(); } } else if (flags & CLUSTER_TODO_HANDLE_FAILOVER) { /* Handle failover, this is needed when it is likely that there is already - * the quorum from masters in order to react fast. */ - clusterHandleSlaveFailover(); + * the quorum from primaries in order to react fast. */ + clusterHandleReplicaFailover(); } /* Update the cluster state. */ @@ -4847,6 +4851,9 @@ void clusterBeforeSleep(void) { } void clusterDoBeforeSleep(int flags) { + /* Clear the cache if there are config changes here. */ + if (flags & CLUSTER_TODO_SAVE_CONFIG) clearCachedClusterSlotsResponse(); + server.cluster->todo_before_sleep |= flags; } @@ -4876,21 +4883,21 @@ void bitmapClearBit(unsigned char *bitmap, int pos) { bitmap[byte] &= ~(1 << bit); } -/* Return non-zero if there is at least one master with slaves in the cluster. +/* Return non-zero if there is at least one primary with replicas in the cluster. * Otherwise zero is returned. Used by clusterNodeSetSlotBit() to set the - * MIGRATE_TO flag the when a master gets the first slot. */ -int clusterMastersHaveSlaves(void) { - dictIterator *di = dictGetSafeIterator(server.cluster->nodes); + * MIGRATE_TO flag the when a primary gets the first slot. */ +int clusterPrimariesHaveReplicas(void) { + dictIterator di; + dictInitIterator(&di, server.cluster->nodes); dictEntry *de; - int slaves = 0; - while ((de = dictNext(di)) != NULL) { + int replicas = 0; + while ((de = dictNext(&di)) != NULL) { clusterNode *node = dictGetVal(de); - if (nodeIsSlave(node)) continue; - slaves += node->numslaves; + if (nodeIsReplica(node)) continue; + replicas += node->num_replicas; } - dictReleaseIterator(di); - return slaves != 0; + return replicas != 0; } /* Set the slot bit and return the old value. */ @@ -4899,20 +4906,20 @@ int clusterNodeSetSlotBit(clusterNode *n, int slot) { if (!old) { bitmapSetBit(n->slots, slot); n->numslots++; - /* When a master gets its first slot, even if it has no slaves, - * it gets flagged with MIGRATE_TO, that is, the master is a valid + /* When a primary gets its first slot, even if it has no replicas, + * it gets flagged with MIGRATE_TO, that is, the primary is a valid * target for replicas migration, if and only if at least one of - * the other masters has slaves right now. + * the other primaries has replicas right now. * - * Normally masters are valid targets of replica migration if: - * 1. The used to have slaves (but no longer have). - * 2. They are slaves failing over a master that used to have slaves. + * Normally primaries are valid targets of replica migration if: + * 1. The used to have replicas (but no longer have). + * 2. They are replicas failing over a primary that used to have replicas. * - * However new masters with slots assigned are considered valid - * migration targets if the rest of the cluster is not a slave-less. + * However new primaries with slots assigned are considered valid + * migration targets if the rest of the cluster is not a replica-less. * * See https://github.com/redis/redis/issues/3043 for more info. */ - if (n->numslots == 1 && clusterMastersHaveSlaves()) n->flags |= CLUSTER_NODE_MIGRATE_TO; + if (n->numslots == 1 && clusterPrimariesHaveReplicas()) n->flags |= CLUSTER_NODE_MIGRATE_TO; } return old; } @@ -4952,7 +4959,7 @@ int clusterDelSlot(int slot) { if (!n) return C_ERR; - /* Cleanup the channels in master/replica as part of slot deletion. */ + /* Cleanup the channels in primary/replica as part of slot deletion. */ removeChannelsInSlot(slot); /* Clear the slot bit. */ serverAssert(clusterNodeClearSlotBit(n, slot) == 1); @@ -4977,7 +4984,7 @@ int clusterDelNodeSlots(clusterNode *node) { } /* Clear the migrating / importing state for all the slots. - * This is useful at initialization and when turning a master into slave. */ + * This is useful at initialization and when turning a primary into replica. */ void clusterCloseAllSlots(void) { memset(server.cluster->migrating_slots_to, 0, sizeof(server.cluster->migrating_slots_to)); memset(server.cluster->importing_slots_from, 0, sizeof(server.cluster->importing_slots_from)); @@ -4997,20 +5004,20 @@ void clusterCloseAllSlots(void) { void clusterUpdateState(void) { int j, new_state; - int reachable_masters = 0; + int reachable_primaries = 0; static mstime_t among_minority_time; static mstime_t first_call_time = 0; server.cluster->todo_before_sleep &= ~CLUSTER_TODO_UPDATE_STATE; - /* If this is a master node, wait some time before turning the state + /* If this is a primary node, wait some time before turning the state * into OK, since it is not a good idea to rejoin the cluster as a writable - * master, after a reboot, without giving the cluster a chance to + * primary, after a reboot, without giving the cluster a chance to * reconfigure this node. Note that the delay is calculated starting from * the first call to this function and not since the server start, in order * to not count the DB loading time. */ if (first_call_time == 0) first_call_time = mstime(); - if (clusterNodeIsMaster(myself) && server.cluster->state == CLUSTER_FAIL && + if (clusterNodeIsPrimary(myself) && server.cluster->state == CLUSTER_FAIL && mstime() - first_call_time < CLUSTER_WRITABLE_DELAY) return; @@ -5028,10 +5035,10 @@ void clusterUpdateState(void) { } } - /* Compute the cluster size, that is the number of master nodes + /* Compute the cluster size, that is the number of primary nodes * serving at least a single slot. * - * At the same time count the number of reachable masters having + * At the same time count the number of reachable primaries having * at least one slot. */ { dictIterator *di; @@ -5042,9 +5049,9 @@ void clusterUpdateState(void) { while ((de = dictNext(di)) != NULL) { clusterNode *node = dictGetVal(de); - if (clusterNodeIsMaster(node) && node->numslots) { + if (clusterNodeIsPrimary(node) && node->numslots) { server.cluster->size++; - if ((node->flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) == 0) reachable_masters++; + if ((node->flags & (CLUSTER_NODE_FAIL | CLUSTER_NODE_PFAIL)) == 0) reachable_primaries++; } } dictReleaseIterator(di); @@ -5055,7 +5062,7 @@ void clusterUpdateState(void) { { int needed_quorum = (server.cluster->size / 2) + 1; - if (reachable_masters < needed_quorum) { + if (reachable_primaries < needed_quorum) { new_state = CLUSTER_FAIL; among_minority_time = mstime(); } @@ -5065,14 +5072,14 @@ void clusterUpdateState(void) { if (new_state != server.cluster->state) { mstime_t rejoin_delay = server.cluster_node_timeout; - /* If the instance is a master and was partitioned away with the + /* If the instance is a primary and was partitioned away with the * minority, don't let it accept queries for some time after the * partition heals, to make sure there is enough time to receive * a configuration update. */ if (rejoin_delay > CLUSTER_MAX_REJOIN_DELAY) rejoin_delay = CLUSTER_MAX_REJOIN_DELAY; if (rejoin_delay < CLUSTER_MIN_REJOIN_DELAY) rejoin_delay = CLUSTER_MIN_REJOIN_DELAY; - if (new_state == CLUSTER_OK && clusterNodeIsMaster(myself) && mstime() - among_minority_time < rejoin_delay) { + if (new_state == CLUSTER_OK && clusterNodeIsPrimary(myself) && mstime() - among_minority_time < rejoin_delay) { return; } @@ -5105,12 +5112,12 @@ int verifyClusterConfigWithData(void) { int update_config = 0; /* Return ASAP if a module disabled cluster redirections. In that case - * every master can store keys about every possible hash slot. */ + * every primary can store keys about every possible hash slot. */ if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return C_OK; - /* If this node is a slave, don't perform the check at all as we + /* If this node is a replica, don't perform the check at all as we * completely depend on the replication stream. */ - if (nodeIsSlave(myself)) return C_OK; + if (nodeIsReplica(myself)) return C_OK; /* Make sure we only have keys in DB0. */ for (j = 1; j < server.dbnum; j++) { @@ -5163,35 +5170,35 @@ int verifyClusterConfigWithData(void) { /* Remove all the shard channel related information not owned by the current shard. */ static inline void removeAllNotOwnedShardChannelSubscriptions(void) { if (!kvstoreSize(server.pubsubshard_channels)) return; - clusterNode *currmaster = clusterNodeIsMaster(myself) ? myself : myself->slaveof; + clusterNode *cur_primary = clusterNodeIsPrimary(myself) ? myself : myself->replicaof; for (int j = 0; j < CLUSTER_SLOTS; j++) { - if (server.cluster->slots[j] != currmaster) { + if (server.cluster->slots[j] != cur_primary) { removeChannelsInSlot(j); } } } /* ----------------------------------------------------------------------------- - * SLAVE nodes handling + * REPLICA nodes handling * -------------------------------------------------------------------------- */ -/* Set the specified node 'n' as master for this node. - * If this node is currently a master, it is turned into a slave. */ -void clusterSetMaster(clusterNode *n, int closeSlots) { +/* Set the specified node 'n' as primary for this node. + * If this node is currently a primary, it is turned into a replica. */ +void clusterSetPrimary(clusterNode *n, int closeSlots) { serverAssert(n != myself); serverAssert(myself->numslots == 0); - if (clusterNodeIsMaster(myself)) { - myself->flags &= ~(CLUSTER_NODE_MASTER | CLUSTER_NODE_MIGRATE_TO); - myself->flags |= CLUSTER_NODE_SLAVE; + if (clusterNodeIsPrimary(myself)) { + myself->flags &= ~(CLUSTER_NODE_PRIMARY | CLUSTER_NODE_MIGRATE_TO); + myself->flags |= CLUSTER_NODE_REPLICA; } else { - if (myself->slaveof) clusterNodeRemoveSlave(myself->slaveof, myself); + if (myself->replicaof) clusterNodeRemoveReplica(myself->replicaof, myself); } if (closeSlots) clusterCloseAllSlots(); - myself->slaveof = n; + myself->replicaof = n; updateShardId(myself, n->shard_id); - clusterNodeAddSlave(n, myself); - replicationSetMaster(n->ip, getNodeDefaultReplicationPort(n)); + clusterNodeAddReplica(n, myself); + replicationSetPrimary(n->ip, getNodeDefaultReplicationPort(n)); removeAllNotOwnedShardChannelSubscriptions(); resetManualFailover(); } @@ -5206,8 +5213,8 @@ struct clusterNodeFlags { }; static struct clusterNodeFlags clusterNodeFlagsTable[] = { - {CLUSTER_NODE_MYSELF, "myself,"}, {CLUSTER_NODE_MASTER, "master,"}, - {CLUSTER_NODE_SLAVE, "slave,"}, {CLUSTER_NODE_PFAIL, "fail?,"}, + {CLUSTER_NODE_MYSELF, "myself,"}, {CLUSTER_NODE_PRIMARY, "master,"}, + {CLUSTER_NODE_REPLICA, "slave,"}, {CLUSTER_NODE_PFAIL, "fail?,"}, {CLUSTER_NODE_FAIL, "fail,"}, {CLUSTER_NODE_HANDSHAKE, "handshake,"}, {CLUSTER_NODE_NOADDR, "noaddr,"}, {CLUSTER_NODE_NOFAILOVER, "nofailover,"}}; @@ -5278,10 +5285,10 @@ sds clusterGenNodeDescription(client *c, clusterNode *node, int tls_primary) { ci = sdscatlen(ci, " ", 1); ci = representClusterNodeFlags(ci, node->flags); - /* Slave of... or just "-" */ + /* Replica of... or just "-" */ ci = sdscatlen(ci, " ", 1); - if (node->slaveof) - ci = sdscatlen(ci, node->slaveof->name, CLUSTER_NAMELEN); + if (node->replicaof) + ci = sdscatlen(ci, node->replicaof->name, CLUSTER_NAMELEN); else ci = sdscatlen(ci, "-", 1); @@ -5541,7 +5548,7 @@ void clusterUpdateSlots(client *c, unsigned char *slots, int del) { long long getNodeReplicationOffset(clusterNode *node) { if (node->flags & CLUSTER_NODE_MYSELF) { - return nodeIsSlave(node) ? replicationGetSlaveOffset() : server.master_repl_offset; + return nodeIsReplica(node) ? replicationGetReplicaOffset() : server.primary_repl_offset; } else { return node->repl_offset; } @@ -5584,7 +5591,7 @@ void addNodeDetailsToShardReply(client *c, clusterNode *node) { long long node_offset = getNodeReplicationOffset(node); addReplyBulkCString(c, "role"); - addReplyBulkCString(c, nodeIsSlave(node) ? "replica" : "master"); + addReplyBulkCString(c, nodeIsReplica(node) ? "replica" : "master"); reply_count++; addReplyBulkCString(c, "replication-offset"); @@ -5595,7 +5602,7 @@ void addNodeDetailsToShardReply(client *c, clusterNode *node) { const char *health_msg = NULL; if (nodeFailed(node)) { health_msg = "fail"; - } else if (nodeIsSlave(node) && node_offset == 0) { + } else if (nodeIsReplica(node) && node_offset == 0) { health_msg = "loading"; } else { health_msg = "online"; @@ -5614,7 +5621,7 @@ void addShardReplyForClusterShards(client *c, list *nodes) { addReplyBulkCString(c, "slots"); /* Use slot_info_pairs from the primary only */ - n = clusterNodeGetMaster(n); + n = clusterNodeGetPrimary(n); if (n->slot_info_pairs != NULL) { serverAssert((n->slot_info_pairs_count % 2) == 0); @@ -5767,10 +5774,10 @@ int getClusterSize(void) { } int getMyShardSlotCount(void) { - if (!nodeIsSlave(server.cluster->myself)) { + if (!nodeIsReplica(server.cluster->myself)) { return server.cluster->myself->numslots; - } else if (server.cluster->myself->slaveof) { - return server.cluster->myself->slaveof->numslots; + } else if (server.cluster->myself->replicaof) { + return server.cluster->myself->replicaof->numslots; } else { return 0; } @@ -5796,8 +5803,8 @@ char **getClusterNodesList(size_t *numnodes) { return ids; } -int clusterNodeIsMaster(clusterNode *n) { - return n->flags & CLUSTER_NODE_MASTER; +int clusterNodeIsPrimary(clusterNode *n) { + return n->flags & CLUSTER_NODE_PRIMARY; } int handleDebugClusterCommand(client *c) { @@ -5841,12 +5848,12 @@ char *clusterNodeIp(clusterNode *node) { return node->ip; } -int clusterNodeIsSlave(clusterNode *node) { - return node->flags & CLUSTER_NODE_SLAVE; +int clusterNodeIsReplica(clusterNode *node) { + return node->flags & CLUSTER_NODE_REPLICA; } -clusterNode *clusterNodeGetMaster(clusterNode *node) { - while (node->slaveof != NULL) node = node->slaveof; +clusterNode *clusterNodeGetPrimary(clusterNode *node) { + while (node->replicaof != NULL) node = node->replicaof; return node; } @@ -5878,39 +5885,30 @@ char *clusterNodeGetShardId(clusterNode *node) { return node->shard_id; } -int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, int *timeout_out) { +/* clusterParseSetSlotCommand validates the arguments of the CLUSTER SETSLOT command, + * extracts the target slot number (slot_out), and determines the target node (node_out) + * if applicable. It also calculates a timeout value (timeout_out) based on an optional + * timeout argument. If provided, the timeout is added to the current time to obtain an + * absolute timestamp; if omitted, the default timeout CLUSTER_OPERATION_TIMEOUT is used; + * if set to 0, it indicates no timeout. The function returns 1 if successful, and 0 + * otherwise, after sending an error message to the client. */ +int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, mstime_t *timeout_out) { int slot = -1; clusterNode *n = NULL; - int timeout = 0; + mstime_t timeout = commandTimeSnapshot() + CLUSTER_OPERATION_TIMEOUT; + int optarg_pos = 0; /* Allow primaries to replicate "CLUSTER SETSLOT" */ - if (!(c->flags & CLIENT_MASTER) && nodeIsSlave(myself)) { + if (!(c->flags & CLIENT_PRIMARY) && nodeIsReplica(myself)) { addReplyError(c, "Please use SETSLOT only with masters."); return 0; } - /* Process optional arguments */ - for (int i = 0; i < c->argc;) { - if (!strcasecmp(c->argv[i]->ptr, "timeout")) { - if (i + 1 < c->argc) { - timeout = (int)strtol(c->argv[i + 1]->ptr, NULL, 10); - decrRefCount(c->argv[i]); - decrRefCount(c->argv[i + 1]); - memmove(&c->argv[i], &c->argv[i + 2], c->argc - i - 2); - c->argc -= 2; - continue; - } - addReplyError(c, "Missing timeout value."); - return 0; - } - i++; - } - if ((slot = getSlotOrReply(c, c->argv[2])) == -1) return 0; if (!strcasecmp(c->argv[3]->ptr, "migrating") && c->argc >= 5) { - /* Scope the check to primaries only */ - if (nodeIsMaster(myself) && server.cluster->slots[slot] != myself) { + /* CLUSTER SETSLOT MIGRATING */ + if (nodeIsPrimary(myself) && server.cluster->slots[slot] != myself) { addReplyErrorFormat(c, "I'm not the owner of hash slot %u", slot); return 0; } @@ -5919,11 +5917,13 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "I don't know about node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } + if (c->argc > 5) optarg_pos = 5; } else if (!strcasecmp(c->argv[3]->ptr, "importing") && c->argc >= 5) { + /* CLUSTER SETSLOT IMPORTING */ if (server.cluster->slots[slot] == myself) { addReplyErrorFormat(c, "I'm already the owner of hash slot %u", slot); return 0; @@ -5933,12 +5933,14 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "I don't know about node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } + if (c->argc > 5) optarg_pos = 5; } else if (!strcasecmp(c->argv[3]->ptr, "stable") && c->argc >= 4) { - /* Do nothing */ + /* CLUSTER SETSLOT STABLE */ + if (c->argc > 4) optarg_pos = 4; } else if (!strcasecmp(c->argv[3]->ptr, "node") && c->argc >= 5) { /* CLUSTER SETSLOT NODE */ n = clusterLookupNode(c->argv[4]->ptr, sdslen(c->argv[4]->ptr)); @@ -5946,7 +5948,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, addReplyErrorFormat(c, "Unknown node %s", (char *)c->argv[4]->ptr); return 0; } - if (nodeIsSlave(n)) { + if (nodeIsReplica(n)) { addReplyError(c, "Target node is not a master"); return 0; } @@ -5961,11 +5963,23 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, return 0; } } + if (c->argc > 5) optarg_pos = 5; } else { addReplyError(c, "Invalid CLUSTER SETSLOT action or number of arguments. Try CLUSTER HELP"); return 0; } + /* Process optional arguments */ + for (int i = optarg_pos; i < c->argc; i++) { + if (!strcasecmp(c->argv[i]->ptr, "timeout")) { + if (i + 1 >= c->argc) { + addReplyError(c, "Missing timeout value"); + return 0; + } + if (getTimeoutFromObjectOrReply(c, c->argv[i + 1], &timeout, UNIT_MILLISECONDS) != C_OK) return 0; + } + } + *slot_out = slot; *node_out = n; *timeout_out = timeout; @@ -5974,7 +5988,7 @@ int clusterParseSetSlotCommand(client *c, int *slot_out, clusterNode **node_out, void clusterCommandSetSlot(client *c) { int slot; - int timeout_ms; + mstime_t timeout_ms; clusterNode *n; if (!clusterParseSetSlotCommand(c, &slot, &n, &timeout_ms)) return; @@ -6009,21 +6023,20 @@ void clusterCommandSetSlot(client *c) { * This ensures that all replicas have the latest topology information, enabling * a reliable slot ownership transfer even if the primary node went down during * the process. */ - if (nodeIsMaster(myself) && myself->numslaves != 0 && (c->flags & CLIENT_PREREPL_DONE) == 0) { + if (nodeIsPrimary(myself) && myself->num_replicas != 0 && (c->flags & CLIENT_REPLICATION_DONE) == 0) { forceCommandPropagation(c, PROPAGATE_REPL); /* We are a primary and this is the first time we see this `SETSLOT` * command. Force-replicate the command to all of our replicas * first and only on success will we handle the command. * Note that * 1. All replicas are expected to ack the replication within the given timeout - * 2. The repl offset target is set to the master's current repl offset + 1. + * 2. The repl offset target is set to the primary's current repl offset + 1. * There is no concern of partial replication because replicas always * ack the repl offset at the command boundary. */ - if (timeout_ms == 0) { - timeout_ms = CLUSTER_OPERATION_TIMEOUT; - } - blockForPreReplication(c, mstime() + timeout_ms, server.master_repl_offset + 1, myself->numslaves); - replicationRequestAckFromSlaves(); + blockClientForReplicaAck(c, timeout_ms, server.primary_repl_offset + 1, myself->num_replicas, 0); + /* Mark client as pending command for execution after replication to replicas. */ + c->flags |= CLIENT_PENDING_COMMAND; + replicationRequestAckFromReplicas(); return; } @@ -6056,20 +6069,20 @@ void clusterCommandSetSlot(client *c) { clusterDelSlot(slot); clusterAddSlot(n, slot); - /* If we are a master left without slots, we should turn into a - * replica of the new master. */ + /* If we are a primary left without slots, we should turn into a + * replica of the new primary. */ if (slot_was_mine && n != myself && myself->numslots == 0 && server.cluster_allow_replica_migration) { serverLog(LL_NOTICE, "Lost my last slot during slot migration. Reconfiguring myself " "as a replica of %.40s (%s) in shard %.40s", n->name, n->human_nodename, n->shard_id); - clusterSetMaster(n, 1); + clusterSetPrimary(n, 1); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } /* If this node or this node's primary was importing this slot, * assigning the slot to itself also clears the importing status. */ - if ((n == myself || n == myself->slaveof) && server.cluster->importing_slots_from[slot]) { + if ((n == myself || n == myself->replicaof) && server.cluster->importing_slots_from[slot]) { server.cluster->importing_slots_from[slot] = NULL; /* Only primary broadcasts the updates */ @@ -6230,7 +6243,7 @@ int clusterCommandSpecial(client *c) { } else if (n == myself) { addReplyError(c, "I tried hard but I can't forget myself..."); return 1; - } else if (nodeIsSlave(myself) && myself->slaveof == n) { + } else if (nodeIsReplica(myself) && myself->replicaof == n) { addReplyError(c, "Can't forget my master!"); return 1; } @@ -6253,23 +6266,23 @@ int clusterCommandSpecial(client *c) { return 1; } - /* Can't replicate a slave. */ - if (nodeIsSlave(n)) { + /* Can't replicate a replica. */ + if (nodeIsReplica(n)) { addReplyError(c, "I can only replicate a master, not a replica."); return 1; } - /* If the instance is currently a master, it should have no assigned + /* If the instance is currently a primary, it should have no assigned * slots nor keys to accept to replicate some other node. - * Slaves can switch to another master without issues. */ - if (clusterNodeIsMaster(myself) && (myself->numslots != 0 || kvstoreSize(server.db[0].keys) != 0)) { + * Replicas can switch to another primary without issues. */ + if (clusterNodeIsPrimary(myself) && (myself->numslots != 0 || kvstoreSize(server.db[0].keys) != 0)) { addReplyError(c, "To set a master the node must be empty and " "without assigned slots."); return 1; } - /* Set the master. */ - clusterSetMaster(n, 1); + /* Set the primary. */ + clusterSetPrimary(n, 1); clusterBroadcastPong(CLUSTER_BROADCAST_ALL); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); addReply(c, shared.ok); @@ -6300,13 +6313,13 @@ int clusterCommandSpecial(client *c) { } /* Check preconditions. */ - if (clusterNodeIsMaster(myself)) { + if (clusterNodeIsPrimary(myself)) { addReplyError(c, "You should send CLUSTER FAILOVER to a replica"); return 1; - } else if (myself->slaveof == NULL) { + } else if (myself->replicaof == NULL) { addReplyError(c, "I'm a replica but my master is unknown to me"); return 1; - } else if (!force && (nodeFailed(myself->slaveof) || myself->slaveof->link == NULL)) { + } else if (!force && (nodeFailed(myself->replicaof) || myself->replicaof->link == NULL)) { addReplyError(c, "Master is down or failed, " "please use CLUSTER FAILOVER FORCE"); return 1; @@ -6317,20 +6330,20 @@ int clusterCommandSpecial(client *c) { if (takeover) { /* A takeover does not perform any initial check. It just * generates a new configuration epoch for this node without - * consensus, claims the master's slots, and broadcast the new + * consensus, claims the primary's slots, and broadcast the new * configuration. */ - serverLog(LL_NOTICE, "Taking over the master (user request)."); + serverLog(LL_NOTICE, "Taking over the primary (user request)."); clusterBumpConfigEpochWithoutConsensus(); - clusterFailoverReplaceYourMaster(); + clusterFailoverReplaceYourPrimary(); } else if (force) { /* If this is a forced failover, we don't need to talk with our - * master to agree about the offset. We just failover taking over + * primary to agree about the offset. We just failover taking over * it without coordination. */ serverLog(LL_NOTICE, "Forced failover user request accepted."); server.cluster->mf_can_start = 1; } else { serverLog(LL_NOTICE, "Manual failover user request accepted."); - clusterSendMFStart(myself->slaveof); + clusterSendMFStart(myself->replicaof); } addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "set-config-epoch") && c->argc == 3) { @@ -6380,9 +6393,9 @@ int clusterCommandSpecial(client *c) { } } - /* Slaves can be reset while containing data, but not master nodes + /* Replicas can be reset while containing data, but not primary nodes * that must be empty. */ - if (clusterNodeIsMaster(myself) && kvstoreSize(c->db->keys) != 0) { + if (clusterNodeIsPrimary(myself) && kvstoreSize(c->db->keys) != 0) { addReplyError(c, "CLUSTER RESET can't be called with " "master nodes containing keys"); return 1; @@ -6439,12 +6452,12 @@ const char **clusterCommandExtendedHelp(void) { return help; } -int clusterNodeNumSlaves(clusterNode *node) { - return node->numslaves; +int clusterNodeNumReplicas(clusterNode *node) { + return node->num_replicas; } -clusterNode *clusterNodeGetSlave(clusterNode *node, int slave_idx) { - return node->slaves[slave_idx]; +clusterNode *clusterNodeGetReplica(clusterNode *node, int replica_idx) { + return node->replicas[replica_idx]; } clusterNode *getMigratingSlotDest(int slot) { @@ -6490,13 +6503,13 @@ int clusterAllowFailoverCmd(client *c) { return 0; } -void clusterPromoteSelfToMaster(void) { - replicationUnsetMaster(); +void clusterPromoteSelfToPrimary(void) { + replicationUnsetPrimary(); } int detectAndUpdateCachedNodeHealth(void) { dictIterator di; - dictInitSafeIterator(&di, server.cluster->nodes); + dictInitIterator(&di, server.cluster->nodes); dictEntry *de; clusterNode *node; int overall_health_changed = 0; @@ -6512,39 +6525,82 @@ int detectAndUpdateCachedNodeHealth(void) { return overall_health_changed; } -/* Replicate migrating and importing slot states to all replicas */ -void clusterReplicateOpenSlots(void) { - if (!server.cluster_enabled) return; +/* Encode open slot states into an sds string to be persisted as an aux field in RDB. */ +sds clusterEncodeOpenSlotsAuxField(int rdbflags) { + if (!server.cluster_enabled) return NULL; - int argc = 5; - robj **argv = zmalloc(sizeof(robj *) * argc); + /* Open slots should not be persisted to an RDB file. This data is intended only for full sync. */ + if ((rdbflags & RDBFLAGS_REPLICATION) == 0) return NULL; - argv[0] = shared.cluster; - argv[1] = shared.setslot; + sds s = NULL; for (int i = 0; i < 2; i++) { - clusterNode **nodes_ptr = NULL; + clusterNode **nodes_ptr; if (i == 0) { nodes_ptr = server.cluster->importing_slots_from; - argv[3] = shared.importing; } else { nodes_ptr = server.cluster->migrating_slots_to; - argv[3] = shared.migrating; } for (int j = 0; j < CLUSTER_SLOTS; j++) { if (nodes_ptr[j] == NULL) continue; + if (s == NULL) s = sdsempty(); + s = sdscatfmt(s, "%i%s", j, (i == 0) ? "<" : ">"); + s = sdscatlen(s, nodes_ptr[j]->name, CLUSTER_NAMELEN); + s = sdscatlen(s, ",", 1); + } + } + + return s; +} + +/* Decode the open slot aux field and restore the in-memory slot states. */ +int clusterDecodeOpenSlotsAuxField(int rdbflags, sds s) { + if (!server.cluster_enabled || s == NULL) return C_OK; + + /* Open slots should not be loaded from a persisted RDB file, but only from a full sync. */ + if ((rdbflags & RDBFLAGS_REPLICATION) == 0) return C_OK; - argv[2] = createStringObjectFromLongLongForValue(j); - sds name = sdsnewlen(nodes_ptr[j]->name, sizeof(nodes_ptr[j]->name)); - argv[4] = createObject(OBJ_STRING, name); + while (*s) { + /* Extract slot number */ + int slot = atoi(s); + if (slot < 0 || slot >= CLUSTER_SLOTS) return C_ERR; - replicationFeedSlaves(0, argv, argc); + while (*s && *s != '<' && *s != '>') s++; + if (*s != '<' && *s != '>') return C_ERR; - decrRefCount(argv[2]); - decrRefCount(argv[4]); + /* Determine if it's an importing or migrating slot */ + int is_importing = (*s == '<'); + s++; + + /* Extract the node name */ + char node_name[CLUSTER_NAMELEN]; + int k = 0; + while (*s && *s != ',' && k < CLUSTER_NAMELEN) { + node_name[k++] = *s++; + } + + /* Ensure the node name is of the correct length */ + if (k != CLUSTER_NAMELEN || *s != ',') return C_ERR; + + /* Move to the next slot */ + s++; + + /* Find the corresponding node */ + clusterNode *node = clusterLookupNode(node_name, CLUSTER_NAMELEN); + if (!node) { + /* Create a new node if not found */ + node = createClusterNode(node_name, 0); + clusterAddNode(node); + } + + /* Set the slot state */ + if (is_importing) { + server.cluster->importing_slots_from[slot] = node; + } else { + server.cluster->migrating_slots_to[slot] = node; } } - zfree(argv); + return C_OK; } diff --git a/src/cluster_legacy.h b/src/cluster_legacy.h index a5154bddb3..fb80f45eec 100644 --- a/src/cluster_legacy.h +++ b/src/cluster_legacy.h @@ -5,13 +5,13 @@ /* The following defines are amount of time, sometimes expressed as * multiplicators of the node timeout value (when ending with MULT). */ -#define CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ -#define CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if master is back. */ -#define CLUSTER_MF_TIMEOUT 5000 /* Milliseconds to do a manual failover. */ -#define CLUSTER_MF_PAUSE_MULT 2 /* Master pause manual failover mult. */ -#define CLUSTER_SLAVE_MIGRATION_DELAY 5000 /* Delay for slave migration. */ +#define CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ +#define CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if primary is back. */ +#define CLUSTER_MF_TIMEOUT 5000 /* Milliseconds to do a manual failover. */ +#define CLUSTER_MF_PAUSE_MULT 2 /* Primary pause manual failover mult. */ +#define CLUSTER_REPLICA_MIGRATION_DELAY 5000 /* Delay for replica migration. */ -/* Reasons why a slave is not able to failover. */ +/* Reasons why a replica is not able to failover. */ #define CLUSTER_CANT_FAILOVER_NONE 0 #define CLUSTER_CANT_FAILOVER_DATA_AGE 1 #define CLUSTER_CANT_FAILOVER_WAITING_DELAY 2 @@ -41,23 +41,23 @@ typedef struct clusterLink { } clusterLink; /* Cluster node flags and macros. */ -#define CLUSTER_NODE_MASTER (1 << 0) /* The node is a master */ -#define CLUSTER_NODE_SLAVE (1 << 1) /* The node is a slave */ +#define CLUSTER_NODE_PRIMARY (1 << 0) /* The node is a primary */ +#define CLUSTER_NODE_REPLICA (1 << 1) /* The node is a replica */ #define CLUSTER_NODE_PFAIL (1 << 2) /* Failure? Need acknowledge */ #define CLUSTER_NODE_FAIL (1 << 3) /* The node is believed to be malfunctioning */ #define CLUSTER_NODE_MYSELF (1 << 4) /* This node is myself */ #define CLUSTER_NODE_HANDSHAKE (1 << 5) /* We have still to exchange the first ping */ #define CLUSTER_NODE_NOADDR (1 << 6) /* We don't know the address of this node */ #define CLUSTER_NODE_MEET (1 << 7) /* Send a MEET message to this node */ -#define CLUSTER_NODE_MIGRATE_TO (1 << 8) /* Master eligible for replica migration. */ -#define CLUSTER_NODE_NOFAILOVER (1 << 9) /* Slave will not try to failover. */ +#define CLUSTER_NODE_MIGRATE_TO (1 << 8) /* Primary eligible for replica migration. */ +#define CLUSTER_NODE_NOFAILOVER (1 << 9) /* replica will not try to failover. */ #define CLUSTER_NODE_EXTENSIONS_SUPPORTED (1 << 10) /* This node supports extensions. */ #define CLUSTER_NODE_NULL_NAME \ "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" \ "\000\000\000\000\000\000\000\000\000\000\000\000" -#define nodeIsMaster(n) ((n)->flags & CLUSTER_NODE_MASTER) -#define nodeIsSlave(n) ((n)->flags & CLUSTER_NODE_SLAVE) +#define nodeIsPrimary(n) ((n)->flags & CLUSTER_NODE_PRIMARY) +#define nodeIsReplica(n) ((n)->flags & CLUSTER_NODE_REPLICA) #define nodeInHandshake(n) ((n)->flags & CLUSTER_NODE_HANDSHAKE) #define nodeHasAddr(n) (!((n)->flags & CLUSTER_NODE_NOADDR)) #define nodeTimedOut(n) ((n)->flags & CLUSTER_NODE_PFAIL) @@ -216,14 +216,14 @@ typedef struct { uint16_t type; /* Message type */ uint16_t count; /* Number of gossip sections. */ uint64_t currentEpoch; /* The epoch accordingly to the sending node. */ - uint64_t configEpoch; /* The config epoch if it's a master, or the last - epoch advertised by its master if it is a - slave. */ - uint64_t offset; /* Master replication offset if node is a master or - processed replication offset if node is a slave. */ + uint64_t configEpoch; /* The config epoch if it's a primary, or the last + epoch advertised by its primary if it is a + replica. */ + uint64_t offset; /* Primary replication offset if node is a primary or + processed replication offset if node is a replica. */ char sender[CLUSTER_NAMELEN]; /* Name of the sender node */ unsigned char myslots[CLUSTER_SLOTS / 8]; - char slaveof[CLUSTER_NAMELEN]; + char replicaof[CLUSTER_NAMELEN]; char myip[NET_IP_STR_LEN]; /* Sender IP, if not all zeroed. */ uint16_t extensions; /* Number of extensions sent along with this packet. */ char notused1[30]; /* 30 bytes reserved for future usage. */ @@ -256,7 +256,7 @@ static_assert(offsetof(clusterMsg, configEpoch) == 24, "unexpected field offset" static_assert(offsetof(clusterMsg, offset) == 32, "unexpected field offset"); static_assert(offsetof(clusterMsg, sender) == 40, "unexpected field offset"); static_assert(offsetof(clusterMsg, myslots) == 80, "unexpected field offset"); -static_assert(offsetof(clusterMsg, slaveof) == 2128, "unexpected field offset"); +static_assert(offsetof(clusterMsg, replicaof) == 2128, "unexpected field offset"); static_assert(offsetof(clusterMsg, myip) == 2168, "unexpected field offset"); static_assert(offsetof(clusterMsg, extensions) == 2214, "unexpected field offset"); static_assert(offsetof(clusterMsg, notused1) == 2216, "unexpected field offset"); @@ -271,10 +271,10 @@ static_assert(offsetof(clusterMsg, data) == 2256, "unexpected field offset"); /* Message flags better specify the packet content or are used to * provide some information about the node state. */ -#define CLUSTERMSG_FLAG0_PAUSED (1 << 0) /* Master paused for manual failover. */ +#define CLUSTERMSG_FLAG0_PAUSED (1 << 0) /* Primary paused for manual failover. */ #define CLUSTERMSG_FLAG0_FORCEACK \ (1 << 1) /* Give ACK to AUTH_REQUEST even if \ - master is up. */ + primary is up. */ #define CLUSTERMSG_FLAG0_EXT_DATA (1 << 2) /* Message contains extension data */ struct _clusterNode { @@ -287,20 +287,20 @@ struct _clusterNode { uint16_t *slot_info_pairs; /* Slots info represented as (start/end) pair (consecutive index). */ int slot_info_pairs_count; /* Used number of slots in slot_info_pairs */ int numslots; /* Number of slots handled by this node */ - int numslaves; /* Number of slave nodes, if this is a master */ - clusterNode **slaves; /* pointers to slave nodes */ - clusterNode *slaveof; /* pointer to the master node. Note that it - may be NULL even if the node is a slave - if we don't have the master node in our - tables. */ + int num_replicas; /* Number of replica nodes, if this is a primar */ + clusterNode **replicas; /* pointers to replica nodes */ + clusterNode *replicaof; /* pointer to the primary node. Note that it + may be NULL even if the node is a replica + if we don't have the parimary node in our + tables. */ unsigned long long last_in_ping_gossip; /* The number of the last carried in the ping gossip section */ mstime_t ping_sent; /* Unix time we sent latest ping */ mstime_t pong_received; /* Unix time we received the pong */ mstime_t data_received; /* Unix time we received any data */ mstime_t fail_time; /* Unix time when FAIL flag was set */ - mstime_t voted_time; /* Last time we voted for a slave of this master */ + mstime_t voted_time; /* Last time we voted for a replica of this parimary */ mstime_t repl_offset_time; /* Unix time we received offset for this node */ - mstime_t orphaned_time; /* Starting time of orphaned master condition */ + mstime_t orphaned_time; /* Starting time of orphaned primary condition */ long long repl_offset; /* Last known repl offset for this node. */ char ip[NET_IP_STR_LEN]; /* Latest known IP address of this node */ sds hostname; /* The known hostname for this node */ @@ -319,32 +319,32 @@ struct clusterState { clusterNode *myself; /* This node */ uint64_t currentEpoch; int state; /* CLUSTER_OK, CLUSTER_FAIL, ... */ - int size; /* Num of master nodes with at least one slot */ + int size; /* Num of primary nodes with at least one slot */ dict *nodes; /* Hash table of name -> clusterNode structures */ dict *shards; /* Hash table of shard_id -> list (of nodes) structures */ dict *nodes_black_list; /* Nodes we don't re-add for a few seconds. */ clusterNode *migrating_slots_to[CLUSTER_SLOTS]; clusterNode *importing_slots_from[CLUSTER_SLOTS]; clusterNode *slots[CLUSTER_SLOTS]; - /* The following fields are used to take the slave state on elections. */ + /* The following fields are used to take the replica state on elections. */ mstime_t failover_auth_time; /* Time of previous or next election. */ int failover_auth_count; /* Number of votes received so far. */ int failover_auth_sent; /* True if we already asked for votes. */ - int failover_auth_rank; /* This slave rank for current auth request. */ + int failover_auth_rank; /* This replica rank for current auth request. */ uint64_t failover_auth_epoch; /* Epoch of the current election. */ - int cant_failover_reason; /* Why a slave is currently not able to + int cant_failover_reason; /* Why a replica is currently not able to failover. See the CANT_FAILOVER_* macros. */ /* Manual failover state in common. */ mstime_t mf_end; /* Manual failover time limit (ms unixtime). It is zero if there is no MF in progress. */ - /* Manual failover state of master. */ - clusterNode *mf_slave; /* Slave performing the manual failover. */ - /* Manual failover state of slave. */ - long long mf_master_offset; /* Master offset the slave needs to start MF + /* Manual failover state of primary. */ + clusterNode *mf_replica; /* replica performing the manual failover. */ + /* Manual failover state of replica. */ + long long mf_primary_offset; /* Primary offset the replica needs to start MF or -1 if still not received. */ - int mf_can_start; /* If non-zero signal that the manual failover - can start requesting masters vote. */ - /* The following fields are used by masters to take state on elections. */ + int mf_can_start; /* If non-zero signal that the manual failover + can start requesting primary vote. */ + /* The following fields are used by primaries to take state on elections. */ uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */ int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ /* Stats */ diff --git a/src/commands.def b/src/commands.def index bc5a1261f2..06cdb4b87e 100644 --- a/src/commands.def +++ b/src/commands.def @@ -36,6 +36,7 @@ const char *commandGroupStr(int index) { /* BITCOUNT history */ commandHistory BITCOUNT_History[] = { {"7.0.0","Added the `BYTE|BIT` option."}, +{"8.0.0","`end` made optional; when called without argument the command reports the last BYTE."}, }; #endif @@ -51,23 +52,28 @@ keySpec BITCOUNT_Keyspecs[1] = { }; #endif -/* BITCOUNT range unit argument table */ -struct COMMAND_ARG BITCOUNT_range_unit_Subargs[] = { +/* BITCOUNT range end_unit_block unit argument table */ +struct COMMAND_ARG BITCOUNT_range_end_unit_block_unit_Subargs[] = { {MAKE_ARG("byte",ARG_TYPE_PURE_TOKEN,-1,"BYTE",NULL,NULL,CMD_ARG_NONE,0,NULL)}, {MAKE_ARG("bit",ARG_TYPE_PURE_TOKEN,-1,"BIT",NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; +/* BITCOUNT range end_unit_block argument table */ +struct COMMAND_ARG BITCOUNT_range_end_unit_block_Subargs[] = { +{MAKE_ARG("end",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("unit",ARG_TYPE_ONEOF,-1,NULL,NULL,"7.0.0",CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_end_unit_block_unit_Subargs}, +}; + /* BITCOUNT range argument table */ struct COMMAND_ARG BITCOUNT_range_Subargs[] = { {MAKE_ARG("start",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("end",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("unit",ARG_TYPE_ONEOF,-1,NULL,NULL,"7.0.0",CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_unit_Subargs}, +{MAKE_ARG("end-unit-block",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_end_unit_block_Subargs}, }; /* BITCOUNT argument table */ struct COMMAND_ARG BITCOUNT_Args[] = { {MAKE_ARG("key",ARG_TYPE_KEY,0,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, -{MAKE_ARG("range",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,3,NULL),.subargs=BITCOUNT_range_Subargs}, +{MAKE_ARG("range",ARG_TYPE_BLOCK,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,2,NULL),.subargs=BITCOUNT_range_Subargs}, }; /********** BITFIELD ********************/ @@ -960,13 +966,13 @@ struct COMMAND_STRUCT CLUSTER_Subcommands[] = { {MAKE_CMD("forget","Removes a node from the nodes table.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_FORGET_History,0,CLUSTER_FORGET_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_FORGET_Keyspecs,0,NULL,1),.args=CLUSTER_FORGET_Args}, {MAKE_CMD("getkeysinslot","Returns the key names in a hash slot.","O(N) where N is the number of requested keys","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_GETKEYSINSLOT_History,0,CLUSTER_GETKEYSINSLOT_Tips,1,clusterCommand,4,CMD_STALE,0,CLUSTER_GETKEYSINSLOT_Keyspecs,0,NULL,2),.args=CLUSTER_GETKEYSINSLOT_Args}, {MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_HELP_History,0,CLUSTER_HELP_Tips,0,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_HELP_Keyspecs,0,NULL,0)}, -{MAKE_CMD("info","Returns information about the state of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_INFO_History,0,CLUSTER_INFO_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_INFO_Keyspecs,0,NULL,0)}, +{MAKE_CMD("info","Returns information about the state of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_INFO_History,0,CLUSTER_INFO_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_INFO_Keyspecs,0,NULL,0)}, {MAKE_CMD("keyslot","Returns the hash slot for a key.","O(N) where N is the number of bytes in the key","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_KEYSLOT_History,0,CLUSTER_KEYSLOT_Tips,0,clusterCommand,3,CMD_STALE,0,CLUSTER_KEYSLOT_Keyspecs,0,NULL,1),.args=CLUSTER_KEYSLOT_Args}, {MAKE_CMD("links","Returns a list of all TCP links to and from peer nodes.","O(N) where N is the total number of Cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_LINKS_History,0,CLUSTER_LINKS_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_LINKS_Keyspecs,0,NULL,0)}, {MAKE_CMD("meet","Forces a node to handshake with another node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MEET_History,1,CLUSTER_MEET_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_MEET_Keyspecs,0,NULL,3),.args=CLUSTER_MEET_Args}, -{MAKE_CMD("myid","Returns the ID of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYID_History,0,CLUSTER_MYID_Tips,0,clusterCommand,2,CMD_STALE,0,CLUSTER_MYID_Keyspecs,0,NULL,0)}, -{MAKE_CMD("myshardid","Returns the shard ID of a node.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYSHARDID_History,0,CLUSTER_MYSHARDID_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_MYSHARDID_Keyspecs,0,NULL,0)}, -{MAKE_CMD("nodes","Returns the cluster configuration for a node.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_NODES_History,0,CLUSTER_NODES_Tips,1,clusterCommand,2,CMD_STALE,0,CLUSTER_NODES_Keyspecs,0,NULL,0)}, +{MAKE_CMD("myid","Returns the ID of a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYID_History,0,CLUSTER_MYID_Tips,0,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_MYID_Keyspecs,0,NULL,0)}, +{MAKE_CMD("myshardid","Returns the shard ID of a node.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_MYSHARDID_History,0,CLUSTER_MYSHARDID_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_MYSHARDID_Keyspecs,0,NULL,0)}, +{MAKE_CMD("nodes","Returns the cluster configuration for a node.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_NODES_History,0,CLUSTER_NODES_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_NODES_Keyspecs,0,NULL,0)}, {MAKE_CMD("replicas","Lists the replica nodes of a master node.","O(N) where N is the number of replicas.","5.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICAS_History,0,CLUSTER_REPLICAS_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICAS_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICAS_Args}, {MAKE_CMD("replicate","Configure a node as replica of a master node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_REPLICATE_History,0,CLUSTER_REPLICATE_Tips,0,clusterCommand,3,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE,0,CLUSTER_REPLICATE_Keyspecs,0,NULL,1),.args=CLUSTER_REPLICATE_Args}, {MAKE_CMD("reset","Resets a node.","O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect.","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_RESET_History,0,CLUSTER_RESET_Tips,0,clusterCommand,-2,CMD_ADMIN|CMD_STALE|CMD_NOSCRIPT,0,CLUSTER_RESET_Keyspecs,0,NULL,1),.args=CLUSTER_RESET_Args}, @@ -975,7 +981,7 @@ struct COMMAND_STRUCT CLUSTER_Subcommands[] = { {MAKE_CMD("setslot","Binds a hash slot to a node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SETSLOT_History,1,CLUSTER_SETSLOT_Tips,0,clusterCommand,-4,CMD_NO_ASYNC_LOADING|CMD_ADMIN|CMD_STALE|CMD_MAY_REPLICATE,0,CLUSTER_SETSLOT_Keyspecs,0,NULL,3),.args=CLUSTER_SETSLOT_Args}, {MAKE_CMD("shards","Returns the mapping of cluster slots to shards.","O(N) where N is the total number of cluster nodes","7.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SHARDS_History,0,CLUSTER_SHARDS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SHARDS_Keyspecs,0,NULL,0)}, {MAKE_CMD("slaves","Lists the replica nodes of a master node.","O(N) where N is the number of replicas.","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER REPLICAS`","5.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLAVES_History,0,CLUSTER_SLAVES_Tips,1,clusterCommand,3,CMD_ADMIN|CMD_STALE,0,CLUSTER_SLAVES_Keyspecs,0,NULL,1),.args=CLUSTER_SLAVES_Args}, -{MAKE_CMD("slots","Returns the mapping of cluster slots to nodes.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_DEPRECATED,"`CLUSTER SHARDS`","7.0.0","cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,2,CLUSTER_SLOTS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SLOTS_Keyspecs,0,NULL,0)}, +{MAKE_CMD("slots","Returns the mapping of cluster slots to nodes.","O(N) where N is the total number of Cluster nodes","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_SLOTS_History,2,CLUSTER_SLOTS_Tips,1,clusterCommand,2,CMD_LOADING|CMD_STALE,0,CLUSTER_SLOTS_Keyspecs,0,NULL,0)}, {0} }; @@ -10661,7 +10667,7 @@ struct COMMAND_ARG WATCH_Args[] = { /* Main command table */ struct COMMAND_STRUCT serverCommandTable[] = { /* bitmap */ -{MAKE_CMD("bitcount","Counts the number of set bits (population counting) in a string.","O(N)","2.6.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITCOUNT_History,1,BITCOUNT_Tips,0,bitcountCommand,-2,CMD_READONLY,ACL_CATEGORY_BITMAP,BITCOUNT_Keyspecs,1,NULL,2),.args=BITCOUNT_Args}, +{MAKE_CMD("bitcount","Counts the number of set bits (population counting) in a string.","O(N)","2.6.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITCOUNT_History,2,BITCOUNT_Tips,0,bitcountCommand,-2,CMD_READONLY,ACL_CATEGORY_BITMAP,BITCOUNT_Keyspecs,1,NULL,2),.args=BITCOUNT_Args}, {MAKE_CMD("bitfield","Performs arbitrary bitfield integer operations on strings.","O(1) for each subcommand specified","3.2.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITFIELD_History,0,BITFIELD_Tips,0,bitfieldCommand,-2,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_BITMAP,BITFIELD_Keyspecs,1,bitfieldGetKeys,2),.args=BITFIELD_Args}, {MAKE_CMD("bitfield_ro","Performs arbitrary read-only bitfield integer operations on strings.","O(1) for each subcommand specified","6.0.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITFIELD_RO_History,0,BITFIELD_RO_Tips,0,bitfieldroCommand,-2,CMD_READONLY|CMD_FAST,ACL_CATEGORY_BITMAP,BITFIELD_RO_Keyspecs,1,NULL,2),.args=BITFIELD_RO_Args}, {MAKE_CMD("bitop","Performs bitwise operations on multiple strings, and stores the result.","O(N)","2.6.0",CMD_DOC_NONE,NULL,NULL,"bitmap",COMMAND_GROUP_BITMAP,BITOP_History,0,BITOP_Tips,0,bitopCommand,-4,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_BITMAP,BITOP_Keyspecs,2,NULL,3),.args=BITOP_Args}, diff --git a/src/commands/bitcount.json b/src/commands/bitcount.json index 2d277a8551..ad90b39bad 100644 --- a/src/commands/bitcount.json +++ b/src/commands/bitcount.json @@ -10,6 +10,10 @@ [ "7.0.0", "Added the `BYTE|BIT` option." + ], + [ + "8.0.0", + "`end` made optional; when called without argument the command reports the last BYTE." ] ], "command_flags": [ @@ -54,24 +58,31 @@ "type": "integer" }, { - "name": "end", - "type": "integer" - }, - { - "name": "unit", - "type": "oneof", + "name": "end-unit-block", + "type": "block", "optional": true, - "since": "7.0.0", "arguments": [ { - "name": "byte", - "type": "pure-token", - "token": "BYTE" + "name": "end", + "type": "integer" }, { - "name": "bit", - "type": "pure-token", - "token": "BIT" + "name": "unit", + "type": "oneof", + "optional": true, + "since": "7.0.0", + "arguments": [ + { + "name": "byte", + "type": "pure-token", + "token": "BYTE" + }, + { + "name": "bit", + "type": "pure-token", + "token": "BIT" + } + ] } ] } diff --git a/src/commands/cluster-info.json b/src/commands/cluster-info.json index 2c88760eb7..023d9b46bb 100644 --- a/src/commands/cluster-info.json +++ b/src/commands/cluster-info.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "command_tips": [ diff --git a/src/commands/cluster-myid.json b/src/commands/cluster-myid.json index caa62de756..4ef1ff7de9 100644 --- a/src/commands/cluster-myid.json +++ b/src/commands/cluster-myid.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "reply_schema": { diff --git a/src/commands/cluster-myshardid.json b/src/commands/cluster-myshardid.json index 01c05ba926..0e08417eec 100644 --- a/src/commands/cluster-myshardid.json +++ b/src/commands/cluster-myshardid.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "command_tips": [ diff --git a/src/commands/cluster-nodes.json b/src/commands/cluster-nodes.json index 9c5fcbe9a4..e12bca36b2 100644 --- a/src/commands/cluster-nodes.json +++ b/src/commands/cluster-nodes.json @@ -8,6 +8,7 @@ "container": "CLUSTER", "function": "clusterCommand", "command_flags": [ + "LOADING", "STALE" ], "command_tips": [ diff --git a/src/commands/cluster-slots.json b/src/commands/cluster-slots.json index 13f8c26612..ca48f371ea 100644 --- a/src/commands/cluster-slots.json +++ b/src/commands/cluster-slots.json @@ -7,11 +7,6 @@ "arity": 2, "container": "CLUSTER", "function": "clusterCommand", - "deprecated_since": "7.0.0", - "replaced_by": "`CLUSTER SHARDS`", - "doc_flags": [ - "DEPRECATED" - ], "history": [ [ "4.0.0", diff --git a/src/config.c b/src/config.c index 646a5ea639..a609a8f18d 100644 --- a/src/config.c +++ b/src/config.c @@ -124,7 +124,7 @@ configEnum propagation_error_behavior_enum[] = {{"ignore", PROPAGATION_ERR_BEHAV /* Output buffer limits presets. */ clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT] = { {0, 0, 0}, /* normal */ - {1024 * 1024 * 256, 1024 * 1024 * 64, 60}, /* slave */ + {1024 * 1024 * 256, 1024 * 1024 * 64, 60}, /* replica */ {1024 * 1024 * 32, 1024 * 1024 * 8, 60} /* pubsub */ }; @@ -373,7 +373,7 @@ static int updateClientOutputBufferLimit(sds *args, int arg_len, const char **er * error in a single client class is present. */ for (j = 0; j < arg_len; j += 4) { class = getClientTypeByName(args[j]); - if (class == -1 || class == CLIENT_TYPE_MASTER) { + if (class == -1 || class == CLIENT_TYPE_PRIMARY) { if (err) *err = "Invalid client class specified in " "buffer limit configuration."; @@ -574,7 +574,7 @@ void loadServerConfigFromString(char *config) { } /* Sanity checks. */ - if (server.cluster_enabled && server.masterhost) { + if (server.cluster_enabled && server.primary_host) { err = "replicaof directive not allowed in cluster mode"; goto loaderr; } @@ -986,7 +986,6 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state); dictType optionToLineDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ @@ -996,7 +995,6 @@ dictType optionToLineDictType = { dictType optionSetDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -1369,11 +1367,11 @@ void rewriteConfigSaveOption(standardConfig *config, const char *name, struct re if (!server.saveparamslen) { rewriteConfigRewriteLine(state, name, sdsnew("save \"\""), 1); } else { + line = sdsnew(name); for (j = 0; j < server.saveparamslen; j++) { - line = sdscatprintf(sdsempty(), "save %ld %d", (long)server.saveparams[j].seconds, - server.saveparams[j].changes); - rewriteConfigRewriteLine(state, name, line, 1); + line = sdscatprintf(line, " %ld %d", (long)server.saveparams[j].seconds, server.saveparams[j].changes); } + rewriteConfigRewriteLine(state, name, line, 1); } /* Mark "save" as processed in case server.saveparamslen is zero. */ @@ -1424,19 +1422,19 @@ void rewriteConfigDirOption(standardConfig *config, const char *name, struct rew rewriteConfigStringOption(state, name, cwd, NULL); } -/* Rewrite the slaveof option. */ +/* Rewrite the replicaof option. */ void rewriteConfigReplicaOfOption(standardConfig *config, const char *name, struct rewriteConfigState *state) { UNUSED(config); sds line; - /* If this is a master, we want all the slaveof config options + /* If this is a primary, we want all the replicaof config options * in the file to be removed. Note that if this is a cluster instance - * we don't want a slaveof directive inside valkey.conf. */ - if (server.cluster_enabled || server.masterhost == NULL) { + * we don't want a replicaof directive inside valkey.conf. */ + if (server.cluster_enabled || server.primary_host == NULL) { rewriteConfigMarkAsProcessed(state, name); return; } - line = sdscatprintf(sdsempty(), "%s %s %d", name, server.masterhost, server.masterport); + line = sdscatprintf(sdsempty(), "%s %s %d", name, server.primary_host, server.primary_port); rewriteConfigRewriteLine(state, name, line, 1); } @@ -2454,9 +2452,9 @@ static int updateMaxmemory(const char **err) { return 1; } -static int updateGoodSlaves(const char **err) { +static int updateGoodReplicas(const char **err) { UNUSED(err); - refreshGoodSlavesCount(); + refreshGoodReplicasCount(); return 1; } @@ -2790,7 +2788,7 @@ static int setConfigOOMScoreAdjValuesOption(standardConfig *config, sds *argv, i * keep the configuration, which may still be valid for privileged processes. */ - if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_MASTER] || + if (values[CONFIG_OOM_REPLICA] < values[CONFIG_OOM_PRIMARY] || values[CONFIG_OOM_BGCHILD] < values[CONFIG_OOM_REPLICA]) { serverLog(LL_WARNING, "The oom-score-adj-values configuration may not work for non-privileged processes! " "Please consult the documentation."); @@ -2869,18 +2867,18 @@ static int setConfigReplicaOfOption(standardConfig *config, sds *argv, int argc, return 0; } - sdsfree(server.masterhost); - server.masterhost = NULL; + sdsfree(server.primary_host); + server.primary_host = NULL; if (!strcasecmp(argv[0], "no") && !strcasecmp(argv[1], "one")) { return 1; } char *ptr; - server.masterport = strtol(argv[1], &ptr, 10); - if (server.masterport < 0 || server.masterport > 65535 || *ptr != '\0') { + server.primary_port = strtol(argv[1], &ptr, 10); + if (server.primary_port < 0 || server.primary_port > 65535 || *ptr != '\0') { *err = "Invalid master port"; return 0; } - server.masterhost = sdsnew(argv[0]); + server.primary_host = sdsnew(argv[0]); server.repl_state = REPL_STATE_CONNECT; return 1; } @@ -2893,8 +2891,8 @@ static sds getConfigBindOption(standardConfig *config) { static sds getConfigReplicaOfOption(standardConfig *config) { UNUSED(config); char buf[256]; - if (server.masterhost) - snprintf(buf, sizeof(buf), "%s %d", server.masterhost, server.masterport); + if (server.primary_host) + snprintf(buf, sizeof(buf), "%s %d", server.primary_host, server.primary_port); else buf[0] = '\0'; return sdsnew(buf); @@ -3005,1454 +3003,230 @@ static int applyClientMaxMemoryUsage(const char **err) { } standardConfig static_configs[] = { + /* clang-format off */ /* Bool configs */ createBoolConfig("rdbchecksum", NULL, IMMUTABLE_CONFIG, server.rdb_checksum, 1, NULL, NULL), createBoolConfig("daemonize", NULL, IMMUTABLE_CONFIG, server.daemonize, 0, NULL, NULL), - createBoolConfig("io-threads-do-reads", - NULL, - DEBUG_CONFIG | IMMUTABLE_CONFIG, - server.io_threads_do_reads, - 0, - NULL, - NULL), /* Read + parse from threads? */ + createBoolConfig("io-threads-do-reads", NULL, DEBUG_CONFIG | IMMUTABLE_CONFIG, server.io_threads_do_reads, 0, NULL, NULL), /* Read + parse from threads? */ createBoolConfig("always-show-logo", NULL, IMMUTABLE_CONFIG, server.always_show_logo, 0, NULL, NULL), createBoolConfig("protected-mode", NULL, MODIFIABLE_CONFIG, server.protected_mode, 1, NULL, NULL), createBoolConfig("rdbcompression", NULL, MODIFIABLE_CONFIG, server.rdb_compression, 1, NULL, NULL), createBoolConfig("rdb-del-sync-files", NULL, MODIFIABLE_CONFIG, server.rdb_del_sync_files, 0, NULL, NULL), createBoolConfig("activerehashing", NULL, MODIFIABLE_CONFIG, server.activerehashing, 1, NULL, NULL), - createBoolConfig("stop-writes-on-bgsave-error", - NULL, - MODIFIABLE_CONFIG, - server.stop_writes_on_bgsave_err, - 1, - NULL, - NULL), - createBoolConfig("set-proc-title", - NULL, - IMMUTABLE_CONFIG, - server.set_proc_title, - 1, - NULL, - NULL), /* Should setproctitle be used? */ - createBoolConfig("dynamic-hz", NULL, MODIFIABLE_CONFIG, server.dynamic_hz, 1, NULL, NULL), /* Adapt hz to # of - clients.*/ - createBoolConfig("lazyfree-lazy-eviction", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_eviction, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-expire", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_expire, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-server-del", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_server_del, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-user-del", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_user_del, - 0, - NULL, - NULL), - createBoolConfig("lazyfree-lazy-user-flush", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.lazyfree_lazy_user_flush, - 0, - NULL, - NULL), + createBoolConfig("stop-writes-on-bgsave-error", NULL, MODIFIABLE_CONFIG, server.stop_writes_on_bgsave_err, 1, NULL, NULL), + createBoolConfig("set-proc-title", NULL, IMMUTABLE_CONFIG, server.set_proc_title, 1, NULL, NULL), /* Should setproctitle be used? */ + createBoolConfig("dynamic-hz", NULL, MODIFIABLE_CONFIG, server.dynamic_hz, 1, NULL, NULL), /* Adapt hz to # of clients.*/ + createBoolConfig("lazyfree-lazy-eviction", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_eviction, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-expire", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_expire, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-server-del", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_server_del, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-user-del", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_user_del, 0, NULL, NULL), + createBoolConfig("lazyfree-lazy-user-flush", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.lazyfree_lazy_user_flush, 0, NULL, NULL), createBoolConfig("repl-disable-tcp-nodelay", NULL, MODIFIABLE_CONFIG, server.repl_disable_tcp_nodelay, 0, NULL, NULL), - createBoolConfig("repl-diskless-sync", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.repl_diskless_sync, - 1, - NULL, - NULL), - createBoolConfig("aof-rewrite-incremental-fsync", - NULL, - MODIFIABLE_CONFIG, - server.aof_rewrite_incremental_fsync, - 1, - NULL, - NULL), + createBoolConfig("repl-diskless-sync", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_diskless_sync, 1, NULL, NULL), + createBoolConfig("aof-rewrite-incremental-fsync", NULL, MODIFIABLE_CONFIG, server.aof_rewrite_incremental_fsync, 1, NULL, NULL), createBoolConfig("no-appendfsync-on-rewrite", NULL, MODIFIABLE_CONFIG, server.aof_no_fsync_on_rewrite, 0, NULL, NULL), - createBoolConfig("cluster-require-full-coverage", - NULL, - MODIFIABLE_CONFIG, - server.cluster_require_full_coverage, - 1, - NULL, - NULL), - createBoolConfig("rdb-save-incremental-fsync", - NULL, - MODIFIABLE_CONFIG, - server.rdb_save_incremental_fsync, - 1, - NULL, - NULL), + createBoolConfig("cluster-require-full-coverage", NULL, MODIFIABLE_CONFIG, server.cluster_require_full_coverage, 1, NULL, NULL), + createBoolConfig("rdb-save-incremental-fsync", NULL, MODIFIABLE_CONFIG, server.rdb_save_incremental_fsync, 1, NULL, NULL), createBoolConfig("aof-load-truncated", NULL, MODIFIABLE_CONFIG, server.aof_load_truncated, 1, NULL, NULL), createBoolConfig("aof-use-rdb-preamble", NULL, MODIFIABLE_CONFIG, server.aof_use_rdb_preamble, 1, NULL, NULL), createBoolConfig("aof-timestamp-enabled", NULL, MODIFIABLE_CONFIG, server.aof_timestamp_enabled, 0, NULL, NULL), - createBoolConfig("cluster-replica-no-failover", - "cluster-slave-no-failover", - MODIFIABLE_CONFIG, - server.cluster_slave_no_failover, - 0, - NULL, - updateClusterFlags), /* Failover by default. */ - createBoolConfig("replica-lazy-flush", - "slave-lazy-flush", - MODIFIABLE_CONFIG, - server.repl_slave_lazy_flush, - 0, - NULL, - NULL), - createBoolConfig("replica-serve-stale-data", - "slave-serve-stale-data", - MODIFIABLE_CONFIG, - server.repl_serve_stale_data, - 1, - NULL, - NULL), - createBoolConfig("replica-read-only", - "slave-read-only", - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.repl_slave_ro, - 1, - NULL, - NULL), - createBoolConfig("replica-ignore-maxmemory", - "slave-ignore-maxmemory", - MODIFIABLE_CONFIG, - server.repl_slave_ignore_maxmemory, - 1, - NULL, - NULL), - createBoolConfig("jemalloc-bg-thread", - NULL, - MODIFIABLE_CONFIG, - server.jemalloc_bg_thread, - 1, - NULL, - updateJemallocBgThread), - createBoolConfig("activedefrag", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - server.active_defrag_enabled, - 0, - isValidActiveDefrag, - NULL), + createBoolConfig("cluster-replica-no-failover", "cluster-slave-no-failover", MODIFIABLE_CONFIG, server.cluster_replica_no_failover, 0, NULL, updateClusterFlags), /* Failover by default. */ + createBoolConfig("replica-lazy-flush", "slave-lazy-flush", MODIFIABLE_CONFIG, server.repl_replica_lazy_flush, 0, NULL, NULL), + createBoolConfig("replica-serve-stale-data", "slave-serve-stale-data", MODIFIABLE_CONFIG, server.repl_serve_stale_data, 1, NULL, NULL), + createBoolConfig("replica-read-only", "slave-read-only", DEBUG_CONFIG | MODIFIABLE_CONFIG, server.repl_replica_ro, 1, NULL, NULL), + createBoolConfig("replica-ignore-maxmemory", "slave-ignore-maxmemory", MODIFIABLE_CONFIG, server.repl_replica_ignore_maxmemory, 1, NULL, NULL), + createBoolConfig("jemalloc-bg-thread", NULL, MODIFIABLE_CONFIG, server.jemalloc_bg_thread, 1, NULL, updateJemallocBgThread), + createBoolConfig("activedefrag", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, server.active_defrag_enabled, 0, isValidActiveDefrag, NULL), createBoolConfig("syslog-enabled", NULL, IMMUTABLE_CONFIG, server.syslog_enabled, 0, NULL, NULL), createBoolConfig("cluster-enabled", NULL, IMMUTABLE_CONFIG, server.cluster_enabled, 0, NULL, NULL), - createBoolConfig("appendonly", - NULL, - MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, - server.aof_enabled, - 0, - NULL, - updateAppendonly), - createBoolConfig("cluster-allow-reads-when-down", - NULL, - MODIFIABLE_CONFIG, - server.cluster_allow_reads_when_down, - 0, - NULL, - NULL), - createBoolConfig("cluster-allow-pubsubshard-when-down", - NULL, - MODIFIABLE_CONFIG, - server.cluster_allow_pubsubshard_when_down, - 1, - NULL, - NULL), - createBoolConfig("crash-log-enabled", - NULL, - MODIFIABLE_CONFIG, - server.crashlog_enabled, - 1, - NULL, - updateSighandlerEnabled), + createBoolConfig("appendonly", NULL, MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, server.aof_enabled, 0, NULL, updateAppendonly), + createBoolConfig("cluster-allow-reads-when-down", NULL, MODIFIABLE_CONFIG, server.cluster_allow_reads_when_down, 0, NULL, NULL), + createBoolConfig("cluster-allow-pubsubshard-when-down", NULL, MODIFIABLE_CONFIG, server.cluster_allow_pubsubshard_when_down, 1, NULL, NULL), + createBoolConfig("crash-log-enabled", NULL, MODIFIABLE_CONFIG, server.crashlog_enabled, 1, NULL, updateSighandlerEnabled), createBoolConfig("crash-memcheck-enabled", NULL, MODIFIABLE_CONFIG, server.memcheck_enabled, 1, NULL, NULL), - createBoolConfig("use-exit-on-panic", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - server.use_exit_on_panic, - 0, - NULL, - NULL), + createBoolConfig("use-exit-on-panic", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.use_exit_on_panic, 0, NULL, NULL), createBoolConfig("disable-thp", NULL, IMMUTABLE_CONFIG, server.disable_thp, 1, NULL, NULL), - createBoolConfig("cluster-allow-replica-migration", - NULL, - MODIFIABLE_CONFIG, - server.cluster_allow_replica_migration, - 1, - NULL, - NULL), + createBoolConfig("cluster-allow-replica-migration", NULL, MODIFIABLE_CONFIG, server.cluster_allow_replica_migration, 1, NULL, NULL), createBoolConfig("replica-announced", NULL, MODIFIABLE_CONFIG, server.replica_announced, 1, NULL, NULL), createBoolConfig("latency-tracking", NULL, MODIFIABLE_CONFIG, server.latency_tracking_enabled, 1, NULL, NULL), - createBoolConfig("aof-disable-auto-gc", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - server.aof_disable_auto_gc, - 0, - NULL, - updateAofAutoGCEnabled), - createBoolConfig("replica-ignore-disk-write-errors", - NULL, - MODIFIABLE_CONFIG, - server.repl_ignore_disk_write_error, - 0, - NULL, - NULL), - createBoolConfig("extended-redis-compatibility", - NULL, - MODIFIABLE_CONFIG, - server.extended_redis_compat, - 0, - NULL, - updateExtendedRedisCompat), + createBoolConfig("aof-disable-auto-gc", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.aof_disable_auto_gc, 0, NULL, updateAofAutoGCEnabled), + createBoolConfig("replica-ignore-disk-write-errors", NULL, MODIFIABLE_CONFIG, server.repl_ignore_disk_write_error, 0, NULL, NULL), + createBoolConfig("extended-redis-compatibility", NULL, MODIFIABLE_CONFIG, server.extended_redis_compat, 0, NULL, updateExtendedRedisCompat), + createBoolConfig("enable-debug-assert", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, server.enable_debug_assert, 0, NULL, NULL), /* String Configs */ createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL), createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL), - createStringConfig("replica-announce-ip", - "slave-announce-ip", - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.slave_announce_ip, - NULL, - NULL, - NULL), - createStringConfig("masteruser", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.masteruser, - NULL, - NULL, - NULL), - createStringConfig("cluster-announce-ip", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.cluster_announce_ip, - NULL, - NULL, - updateClusterIp), - createStringConfig("cluster-config-file", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.cluster_configfile, - "nodes.conf", - NULL, - NULL), - createStringConfig("cluster-announce-hostname", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.cluster_announce_hostname, - NULL, - isValidAnnouncedHostname, - updateClusterHostname), - createStringConfig("cluster-announce-human-nodename", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.cluster_announce_human_nodename, - NULL, - isValidAnnouncedNodename, - updateClusterHumanNodename), - createStringConfig("syslog-ident", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.syslog_ident, - SERVER_NAME, - NULL, - NULL), - createStringConfig("dbfilename", - NULL, - MODIFIABLE_CONFIG | PROTECTED_CONFIG, - ALLOW_EMPTY_STRING, - server.rdb_filename, - "dump.rdb", - isValidDBfilename, - NULL), - createStringConfig("appendfilename", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.aof_filename, - "appendonly.aof", - isValidAOFfilename, - NULL), - createStringConfig("appenddirname", - NULL, - IMMUTABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.aof_dirname, - "appendonlydir", - isValidAOFdirname, - NULL), - createStringConfig("server-cpulist", - "server_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.server_cpulist, - NULL, - NULL, - NULL), - createStringConfig("bio-cpulist", - "bio_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.bio_cpulist, - NULL, - NULL, - NULL), - createStringConfig("aof-rewrite-cpulist", - "aof_rewrite_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.aof_rewrite_cpulist, - NULL, - NULL, - NULL), - createStringConfig("bgsave-cpulist", - "bgsave_cpulist", - IMMUTABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.bgsave_cpulist, - NULL, - NULL, - NULL), - createStringConfig("ignore-warnings", - NULL, - MODIFIABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.ignore_warnings, - "", - NULL, - NULL), - createStringConfig("proc-title-template", - NULL, - MODIFIABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.proc_title_template, - CONFIG_DEFAULT_PROC_TITLE_TEMPLATE, - isValidProcTitleTemplate, - updateProcTitleTemplate), - createStringConfig("bind-source-addr", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.bind_source_addr, - NULL, - NULL, - NULL), + createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.replica_announce_ip, NULL, NULL, NULL), + createStringConfig("primaryuser", "masteruser", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_user, NULL, NULL, NULL), + createStringConfig("cluster-announce-ip", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_ip, NULL, NULL, updateClusterIp), + createStringConfig("cluster-config-file", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.cluster_configfile, "nodes.conf", NULL, NULL), + createStringConfig("cluster-announce-hostname", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_hostname, NULL, isValidAnnouncedHostname, updateClusterHostname), + createStringConfig("cluster-announce-human-nodename", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.cluster_announce_human_nodename, NULL, isValidAnnouncedNodename, updateClusterHumanNodename), + createStringConfig("syslog-ident", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.syslog_ident, SERVER_NAME, NULL, NULL), + createStringConfig("dbfilename", NULL, MODIFIABLE_CONFIG | PROTECTED_CONFIG, ALLOW_EMPTY_STRING, server.rdb_filename, "dump.rdb", isValidDBfilename, NULL), + createStringConfig("appendfilename", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.aof_filename, "appendonly.aof", isValidAOFfilename, NULL), + createStringConfig("appenddirname", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.aof_dirname, "appendonlydir", isValidAOFdirname, NULL), + createStringConfig("server-cpulist", "server_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.server_cpulist, NULL, NULL, NULL), + createStringConfig("bio-cpulist", "bio_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bio_cpulist, NULL, NULL, NULL), + createStringConfig("aof-rewrite-cpulist", "aof_rewrite_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.aof_rewrite_cpulist, NULL, NULL, NULL), + createStringConfig("bgsave-cpulist", "bgsave_cpulist", IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bgsave_cpulist, NULL, NULL, NULL), + createStringConfig("ignore-warnings", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.ignore_warnings, "", NULL, NULL), + createStringConfig("proc-title-template", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.proc_title_template, CONFIG_DEFAULT_PROC_TITLE_TEMPLATE, isValidProcTitleTemplate, updateProcTitleTemplate), + createStringConfig("bind-source-addr", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.bind_source_addr, NULL, NULL, NULL), createStringConfig("logfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.logfile, "", NULL, NULL), #ifdef LOG_REQ_RES - createStringConfig("req-res-logfile", - NULL, - IMMUTABLE_CONFIG | HIDDEN_CONFIG, - EMPTY_STRING_IS_NULL, - server.req_res_logfile, - NULL, - NULL, - NULL), + createStringConfig("req-res-logfile", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, EMPTY_STRING_IS_NULL, server.req_res_logfile, NULL, NULL, NULL), #endif - createStringConfig("locale-collate", - NULL, - MODIFIABLE_CONFIG, - ALLOW_EMPTY_STRING, - server.locale_collate, - "", - NULL, - updateLocaleCollate), + createStringConfig("locale-collate", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.locale_collate, "", NULL, updateLocaleCollate), /* SDS Configs */ - createSDSConfig("masterauth", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.masterauth, - NULL, - NULL, - NULL), - createSDSConfig("requirepass", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.requirepass, - NULL, - NULL, - updateRequirePass), + createSDSConfig("primaryauth", "masterauth", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_auth, NULL, NULL, NULL), + createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass), /* Enum Configs */ - createEnumConfig("supervised", - NULL, - IMMUTABLE_CONFIG, - supervised_mode_enum, - server.supervised_mode, - SUPERVISED_NONE, - NULL, - NULL), - createEnumConfig("syslog-facility", - NULL, - IMMUTABLE_CONFIG, - syslog_facility_enum, - server.syslog_facility, - LOG_LOCAL0, - NULL, - NULL), - createEnumConfig("repl-diskless-load", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, - repl_diskless_load_enum, - server.repl_diskless_load, - REPL_DISKLESS_LOAD_DISABLED, - NULL, - NULL), + createEnumConfig("supervised", NULL, IMMUTABLE_CONFIG, supervised_mode_enum, server.supervised_mode, SUPERVISED_NONE, NULL, NULL), + createEnumConfig("syslog-facility", NULL, IMMUTABLE_CONFIG, syslog_facility_enum, server.syslog_facility, LOG_LOCAL0, NULL, NULL), + createEnumConfig("repl-diskless-load", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG | DENY_LOADING_CONFIG, repl_diskless_load_enum, server.repl_diskless_load, REPL_DISKLESS_LOAD_DISABLED, NULL, NULL), createEnumConfig("loglevel", NULL, MODIFIABLE_CONFIG, loglevel_enum, server.verbosity, LL_NOTICE, NULL, NULL), - createEnumConfig("maxmemory-policy", - NULL, - MODIFIABLE_CONFIG, - maxmemory_policy_enum, - server.maxmemory_policy, - MAXMEMORY_NO_EVICTION, - NULL, - NULL), - createEnumConfig("appendfsync", - NULL, - MODIFIABLE_CONFIG, - aof_fsync_enum, - server.aof_fsync, - AOF_FSYNC_EVERYSEC, - NULL, - updateAppendFsync), - createEnumConfig("oom-score-adj", - NULL, - MODIFIABLE_CONFIG, - oom_score_adj_enum, - server.oom_score_adj, - OOM_SCORE_ADJ_NO, - NULL, - updateOOMScoreAdj), - createEnumConfig("acl-pubsub-default", - NULL, - MODIFIABLE_CONFIG, - acl_pubsub_default_enum, - server.acl_pubsub_default, - 0, - NULL, - NULL), - createEnumConfig("sanitize-dump-payload", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - sanitize_dump_payload_enum, - server.sanitize_dump_payload, - SANITIZE_DUMP_NO, - NULL, - NULL), - createEnumConfig("enable-protected-configs", - NULL, - IMMUTABLE_CONFIG, - protected_action_enum, - server.enable_protected_configs, - PROTECTED_ACTION_ALLOWED_NO, - NULL, - NULL), - createEnumConfig("enable-debug-command", - NULL, - IMMUTABLE_CONFIG, - protected_action_enum, - server.enable_debug_cmd, - PROTECTED_ACTION_ALLOWED_NO, - NULL, - NULL), - createEnumConfig("enable-module-command", - NULL, - IMMUTABLE_CONFIG, - protected_action_enum, - server.enable_module_cmd, - PROTECTED_ACTION_ALLOWED_NO, - NULL, - NULL), - createEnumConfig("cluster-preferred-endpoint-type", - NULL, - MODIFIABLE_CONFIG, - cluster_preferred_endpoint_type_enum, - server.cluster_preferred_endpoint_type, - CLUSTER_ENDPOINT_TYPE_IP, - NULL, - invalidateClusterSlotsResp), - createEnumConfig("propagation-error-behavior", - NULL, - MODIFIABLE_CONFIG, - propagation_error_behavior_enum, - server.propagation_error_behavior, - PROPAGATION_ERR_BEHAVIOR_IGNORE, - NULL, - NULL), - createEnumConfig("shutdown-on-sigint", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - shutdown_on_sig_enum, - server.shutdown_on_sigint, - 0, - isValidShutdownOnSigFlags, - NULL), - createEnumConfig("shutdown-on-sigterm", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - shutdown_on_sig_enum, - server.shutdown_on_sigterm, - 0, - isValidShutdownOnSigFlags, - NULL), + createEnumConfig("maxmemory-policy", NULL, MODIFIABLE_CONFIG, maxmemory_policy_enum, server.maxmemory_policy, MAXMEMORY_NO_EVICTION, NULL, NULL), + createEnumConfig("appendfsync", NULL, MODIFIABLE_CONFIG, aof_fsync_enum, server.aof_fsync, AOF_FSYNC_EVERYSEC, NULL, updateAppendFsync), + createEnumConfig("oom-score-adj", NULL, MODIFIABLE_CONFIG, oom_score_adj_enum, server.oom_score_adj, OOM_SCORE_ADJ_NO, NULL, updateOOMScoreAdj), + createEnumConfig("acl-pubsub-default", NULL, MODIFIABLE_CONFIG, acl_pubsub_default_enum, server.acl_pubsub_default, 0, NULL, NULL), + createEnumConfig("sanitize-dump-payload", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, sanitize_dump_payload_enum, server.sanitize_dump_payload, SANITIZE_DUMP_NO, NULL, NULL), + createEnumConfig("enable-protected-configs", NULL, IMMUTABLE_CONFIG, protected_action_enum, server.enable_protected_configs, PROTECTED_ACTION_ALLOWED_NO, NULL, NULL), + createEnumConfig("enable-debug-command", NULL, IMMUTABLE_CONFIG, protected_action_enum, server.enable_debug_cmd, PROTECTED_ACTION_ALLOWED_NO, NULL, NULL), + createEnumConfig("enable-module-command", NULL, IMMUTABLE_CONFIG, protected_action_enum, server.enable_module_cmd, PROTECTED_ACTION_ALLOWED_NO, NULL, NULL), + createEnumConfig("cluster-preferred-endpoint-type", NULL, MODIFIABLE_CONFIG, cluster_preferred_endpoint_type_enum, server.cluster_preferred_endpoint_type, CLUSTER_ENDPOINT_TYPE_IP, NULL, invalidateClusterSlotsResp), + createEnumConfig("propagation-error-behavior", NULL, MODIFIABLE_CONFIG, propagation_error_behavior_enum, server.propagation_error_behavior, PROPAGATION_ERR_BEHAVIOR_IGNORE, NULL, NULL), + createEnumConfig("shutdown-on-sigint", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, shutdown_on_sig_enum, server.shutdown_on_sigint, 0, isValidShutdownOnSigFlags, NULL), + createEnumConfig("shutdown-on-sigterm", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, shutdown_on_sig_enum, server.shutdown_on_sigterm, 0, isValidShutdownOnSigFlags, NULL), /* Integer configs */ createIntConfig("databases", NULL, IMMUTABLE_CONFIG, 1, INT_MAX, server.dbnum, 16, INTEGER_CONFIG, NULL, NULL), - createIntConfig("port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.port, - 6379, - INTEGER_CONFIG, - NULL, - updatePort), /* TCP port. */ - createIntConfig("io-threads", - NULL, - DEBUG_CONFIG | IMMUTABLE_CONFIG, - 1, - 128, - server.io_threads_num, - 1, - INTEGER_CONFIG, - NULL, - NULL), /* Single threaded by default */ - createIntConfig("auto-aof-rewrite-percentage", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.aof_rewrite_perc, - 100, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("cluster-replica-validity-factor", - "cluster-slave-validity-factor", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.cluster_slave_validity_factor, - 10, - INTEGER_CONFIG, - NULL, - NULL), /* Slave max data age factor. */ - createIntConfig("list-max-listpack-size", - "list-max-ziplist-size", - MODIFIABLE_CONFIG, - INT_MIN, - INT_MAX, - server.list_max_listpack_size, - -2, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("tcp-keepalive", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.tcpkeepalive, - 300, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("cluster-migration-barrier", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.cluster_migration_barrier, - 1, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("active-defrag-cycle-min", - NULL, - MODIFIABLE_CONFIG, - 1, - 99, - server.active_defrag_cycle_min, - 1, - INTEGER_CONFIG, - NULL, - updateDefragConfiguration), /* Default: 1% CPU min (at lower threshold) */ - createIntConfig("active-defrag-cycle-max", - NULL, - MODIFIABLE_CONFIG, - 1, - 99, - server.active_defrag_cycle_max, - 25, - INTEGER_CONFIG, - NULL, - updateDefragConfiguration), /* Default: 25% CPU max (at upper threshold) */ - createIntConfig("active-defrag-threshold-lower", - NULL, - MODIFIABLE_CONFIG, - 0, - 1000, - server.active_defrag_threshold_lower, - 10, - INTEGER_CONFIG, - NULL, - NULL), /* Default: don't defrag when fragmentation is below 10% */ - createIntConfig("active-defrag-threshold-upper", - NULL, - MODIFIABLE_CONFIG, - 0, - 1000, - server.active_defrag_threshold_upper, - 100, - INTEGER_CONFIG, - NULL, - updateDefragConfiguration), /* Default: maximum defrag force at 100% fragmentation */ - createIntConfig("lfu-log-factor", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.lfu_log_factor, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("lfu-decay-time", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.lfu_decay_time, - 1, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("replica-priority", - "slave-priority", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.slave_priority, - 100, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("repl-diskless-sync-delay", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_diskless_sync_delay, - 5, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("maxmemory-samples", - NULL, - MODIFIABLE_CONFIG, - 1, - 64, - server.maxmemory_samples, - 5, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("maxmemory-eviction-tenacity", - NULL, - MODIFIABLE_CONFIG, - 0, - 100, - server.maxmemory_eviction_tenacity, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.maxidletime, - 0, - INTEGER_CONFIG, - NULL, - NULL), /* Default client timeout: infinite */ - createIntConfig("replica-announce-port", - "slave-announce-port", - MODIFIABLE_CONFIG, - 0, - 65535, - server.slave_announce_port, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("tcp-backlog", - NULL, - IMMUTABLE_CONFIG, - 0, - INT_MAX, - server.tcp_backlog, - 511, - INTEGER_CONFIG, - NULL, - NULL), /* TCP listen backlog. */ + createIntConfig("port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.port, 6379, INTEGER_CONFIG, NULL, updatePort), /* TCP port. */ + createIntConfig("io-threads", NULL, DEBUG_CONFIG | IMMUTABLE_CONFIG, 1, 128, server.io_threads_num, 1, INTEGER_CONFIG, NULL, NULL), /* Single threaded by default */ + createIntConfig("auto-aof-rewrite-percentage", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.aof_rewrite_perc, 100, INTEGER_CONFIG, NULL, NULL), + createIntConfig("cluster-replica-validity-factor", "cluster-slave-validity-factor", MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_replica_validity_factor, 10, INTEGER_CONFIG, NULL, NULL), /* replica max data age factor. */ + createIntConfig("list-max-listpack-size", "list-max-ziplist-size", MODIFIABLE_CONFIG, INT_MIN, INT_MAX, server.list_max_listpack_size, -2, INTEGER_CONFIG, NULL, NULL), + createIntConfig("tcp-keepalive", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tcpkeepalive, 300, INTEGER_CONFIG, NULL, NULL), + createIntConfig("cluster-migration-barrier", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.cluster_migration_barrier, 1, INTEGER_CONFIG, NULL, NULL), + createIntConfig("active-defrag-cycle-min", NULL, MODIFIABLE_CONFIG, 1, 99, server.active_defrag_cycle_min, 1, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: 1% CPU min (at lower threshold) */ + createIntConfig("active-defrag-cycle-max", NULL, MODIFIABLE_CONFIG, 1, 99, server.active_defrag_cycle_max, 25, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: 25% CPU max (at upper threshold) */ + createIntConfig("active-defrag-threshold-lower", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_lower, 10, INTEGER_CONFIG, NULL, NULL), /* Default: don't defrag when fragmentation is below 10% */ + createIntConfig("active-defrag-threshold-upper", NULL, MODIFIABLE_CONFIG, 0, 1000, server.active_defrag_threshold_upper, 100, INTEGER_CONFIG, NULL, updateDefragConfiguration), /* Default: maximum defrag force at 100% fragmentation */ + createIntConfig("lfu-log-factor", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_log_factor, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("lfu-decay-time", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.lfu_decay_time, 1, INTEGER_CONFIG, NULL, NULL), + createIntConfig("replica-priority", "slave-priority", MODIFIABLE_CONFIG, 0, INT_MAX, server.replica_priority, 100, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-diskless-sync-delay", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_delay, 5, INTEGER_CONFIG, NULL, NULL), + createIntConfig("maxmemory-samples", NULL, MODIFIABLE_CONFIG, 1, 64, server.maxmemory_samples, 5, INTEGER_CONFIG, NULL, NULL), + createIntConfig("maxmemory-eviction-tenacity", NULL, MODIFIABLE_CONFIG, 0, 100, server.maxmemory_eviction_tenacity, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.maxidletime, 0, INTEGER_CONFIG, NULL, NULL), /* Default client timeout: infinite */ + createIntConfig("replica-announce-port", "slave-announce-port", MODIFIABLE_CONFIG, 0, 65535, server.replica_announce_port, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("tcp-backlog", NULL, IMMUTABLE_CONFIG, 0, INT_MAX, server.tcp_backlog, 511, INTEGER_CONFIG, NULL, NULL), /* TCP listen backlog. */ createIntConfig("cluster-port", NULL, IMMUTABLE_CONFIG, 0, 65535, server.cluster_port, 0, INTEGER_CONFIG, NULL, NULL), - createIntConfig("cluster-announce-bus-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.cluster_announce_bus_port, - 0, - INTEGER_CONFIG, - NULL, - updateClusterAnnouncedPort), /* Default: Use +10000 offset. */ - createIntConfig("cluster-announce-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.cluster_announce_port, - 0, - INTEGER_CONFIG, - NULL, - updateClusterAnnouncedPort), /* Use server.port */ - createIntConfig("cluster-announce-tls-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.cluster_announce_tls_port, - 0, - INTEGER_CONFIG, - NULL, - updateClusterAnnouncedPort), /* Use server.tls_port */ - createIntConfig("repl-timeout", - NULL, - MODIFIABLE_CONFIG, - 1, - INT_MAX, - server.repl_timeout, - 60, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("repl-ping-replica-period", - "repl-ping-slave-period", - MODIFIABLE_CONFIG, - 1, - INT_MAX, - server.repl_ping_slave_period, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("list-compress-depth", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.list_compress_depth, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("rdb-key-save-delay", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - INT_MIN, - INT_MAX, - server.rdb_key_save_delay, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("key-load-delay", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - INT_MIN, - INT_MAX, - server.key_load_delay, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("active-expire-effort", - NULL, - MODIFIABLE_CONFIG, - 1, - 10, - server.active_expire_effort, - 1, - INTEGER_CONFIG, - NULL, - NULL), /* From 1 to 10. */ - createIntConfig("hz", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.config_hz, - CONFIG_DEFAULT_HZ, - INTEGER_CONFIG, - NULL, - updateHZ), - createIntConfig("min-replicas-to-write", - "min-slaves-to-write", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_min_slaves_to_write, - 0, - INTEGER_CONFIG, - NULL, - updateGoodSlaves), - createIntConfig("min-replicas-max-lag", - "min-slaves-max-lag", - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_min_slaves_max_lag, - 10, - INTEGER_CONFIG, - NULL, - updateGoodSlaves), - createIntConfig("watchdog-period", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - 0, - INT_MAX, - server.watchdog_period, - 0, - INTEGER_CONFIG, - NULL, - updateWatchdogPeriod), - createIntConfig("shutdown-timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.shutdown_timeout, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createIntConfig("repl-diskless-sync-max-replicas", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.repl_diskless_sync_max_replicas, - 0, - INTEGER_CONFIG, - NULL, - NULL), + createIntConfig("cluster-announce-bus-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_bus_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Default: Use +10000 offset. */ + createIntConfig("cluster-announce-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.port */ + createIntConfig("cluster-announce-tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.cluster_announce_tls_port, 0, INTEGER_CONFIG, NULL, updateClusterAnnouncedPort), /* Use server.tls_port */ + createIntConfig("repl-timeout", NULL, MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_timeout, 60, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-ping-replica-period", "repl-ping-slave-period", MODIFIABLE_CONFIG, 1, INT_MAX, server.repl_ping_replica_period, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("list-compress-depth", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 0, INT_MAX, server.list_compress_depth, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("rdb-key-save-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.rdb_key_save_delay, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("key-load-delay", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, INT_MIN, INT_MAX, server.key_load_delay, 0, INTEGER_CONFIG, NULL, NULL), + createIntConfig("active-expire-effort", NULL, MODIFIABLE_CONFIG, 1, 10, server.active_expire_effort, 1, INTEGER_CONFIG, NULL, NULL), /* From 1 to 10. */ + createIntConfig("hz", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.config_hz, CONFIG_DEFAULT_HZ, INTEGER_CONFIG, NULL, updateHZ), + createIntConfig("min-replicas-to-write", "min-slaves-to-write", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_replicas_to_write, 0, INTEGER_CONFIG, NULL, updateGoodReplicas), + createIntConfig("min-replicas-max-lag", "min-slaves-max-lag", MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_min_replicas_max_lag, 10, INTEGER_CONFIG, NULL, updateGoodReplicas), + createIntConfig("watchdog-period", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 0, INT_MAX, server.watchdog_period, 0, INTEGER_CONFIG, NULL, updateWatchdogPeriod), + createIntConfig("shutdown-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.shutdown_timeout, 10, INTEGER_CONFIG, NULL, NULL), + createIntConfig("repl-diskless-sync-max-replicas", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_max_replicas, 0, INTEGER_CONFIG, NULL, NULL), /* Unsigned int configs */ - createUIntConfig("maxclients", - NULL, - MODIFIABLE_CONFIG, - 1, - UINT_MAX, - server.maxclients, - 10000, - INTEGER_CONFIG, - NULL, - updateMaxclients), - createUIntConfig("unixsocketperm", - NULL, - IMMUTABLE_CONFIG, - 0, - 0777, - server.unixsocketperm, - 0, - OCTAL_CONFIG, - NULL, - NULL), - createUIntConfig("socket-mark-id", - NULL, - IMMUTABLE_CONFIG, - 0, - UINT_MAX, - server.socket_mark_id, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createUIntConfig("max-new-connections-per-cycle", - NULL, - MODIFIABLE_CONFIG, - 1, - 1000, - server.max_new_conns_per_cycle, - 10, - INTEGER_CONFIG, - NULL, - NULL), - createUIntConfig("max-new-tls-connections-per-cycle", - NULL, - MODIFIABLE_CONFIG, - 1, - 1000, - server.max_new_tls_conns_per_cycle, - 1, - INTEGER_CONFIG, - NULL, - NULL), + createUIntConfig("maxclients", NULL, MODIFIABLE_CONFIG, 1, UINT_MAX, server.maxclients, 10000, INTEGER_CONFIG, NULL, updateMaxclients), + createUIntConfig("unixsocketperm", NULL, IMMUTABLE_CONFIG, 0, 0777, server.unixsocketperm, 0, OCTAL_CONFIG, NULL, NULL), + createUIntConfig("socket-mark-id", NULL, IMMUTABLE_CONFIG, 0, UINT_MAX, server.socket_mark_id, 0, INTEGER_CONFIG, NULL, NULL), + createUIntConfig("max-new-connections-per-cycle", NULL, MODIFIABLE_CONFIG, 1, 1000, server.max_new_conns_per_cycle, 10, INTEGER_CONFIG, NULL, NULL), + createUIntConfig("max-new-tls-connections-per-cycle", NULL, MODIFIABLE_CONFIG, 1, 1000, server.max_new_tls_conns_per_cycle, 1, INTEGER_CONFIG, NULL, NULL), #ifdef LOG_REQ_RES - createUIntConfig("client-default-resp", - NULL, - IMMUTABLE_CONFIG | HIDDEN_CONFIG, - 2, - 3, - server.client_default_resp, - 2, - INTEGER_CONFIG, - NULL, - NULL), + createUIntConfig("client-default-resp", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, 2, 3, server.client_default_resp, 2, INTEGER_CONFIG, NULL, NULL), #endif /* Unsigned Long configs */ - createULongConfig("active-defrag-max-scan-fields", - NULL, - MODIFIABLE_CONFIG, - 1, - LONG_MAX, - server.active_defrag_max_scan_fields, - 1000, - INTEGER_CONFIG, - NULL, - NULL), /* Default: keys with more than 1000 fields will be processed separately */ - createULongConfig("slowlog-max-len", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.slowlog_max_len, - 128, - INTEGER_CONFIG, - NULL, - NULL), - createULongConfig("acllog-max-len", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.acllog_max_len, - 128, - INTEGER_CONFIG, - NULL, - NULL), + createULongConfig("active-defrag-max-scan-fields", NULL, MODIFIABLE_CONFIG, 1, LONG_MAX, server.active_defrag_max_scan_fields, 1000, INTEGER_CONFIG, NULL, NULL), /* Default: keys with more than 1000 fields will be processed separately */ + createULongConfig("slowlog-max-len", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.slowlog_max_len, 128, INTEGER_CONFIG, NULL, NULL), + createULongConfig("acllog-max-len", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.acllog_max_len, 128, INTEGER_CONFIG, NULL, NULL), /* Long Long configs */ - createLongLongConfig("busy-reply-threshold", - "lua-time-limit", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.busy_reply_threshold, - 5000, - INTEGER_CONFIG, - NULL, - NULL), /* milliseconds */ - createLongLongConfig("cluster-node-timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.cluster_node_timeout, - 15000, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("cluster-ping-interval", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - 0, - LLONG_MAX, - server.cluster_ping_interval, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("slowlog-log-slower-than", - NULL, - MODIFIABLE_CONFIG, - -1, - LLONG_MAX, - server.slowlog_log_slower_than, - 10000, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("latency-monitor-threshold", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.latency_monitor_threshold, - 0, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("proto-max-bulk-len", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - 1024 * 1024, - LONG_MAX, - server.proto_max_bulk_len, - 512ll * 1024 * 1024, - MEMORY_CONFIG, - NULL, - NULL), /* Bulk request max size */ - createLongLongConfig("stream-node-max-entries", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.stream_node_max_entries, - 100, - INTEGER_CONFIG, - NULL, - NULL), - createLongLongConfig("repl-backlog-size", - NULL, - MODIFIABLE_CONFIG, - 1, - LLONG_MAX, - server.repl_backlog_size, - 1024 * 1024, - MEMORY_CONFIG, - NULL, - updateReplBacklogSize), /* Default: 1mb */ + createLongLongConfig("busy-reply-threshold", "lua-time-limit", MODIFIABLE_CONFIG, 0, LONG_MAX, server.busy_reply_threshold, 5000, INTEGER_CONFIG, NULL, NULL), /* milliseconds */ + createLongLongConfig("cluster-node-timeout", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.cluster_node_timeout, 15000, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("cluster-ping-interval", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 0, LLONG_MAX, server.cluster_ping_interval, 0, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("slowlog-log-slower-than", NULL, MODIFIABLE_CONFIG, -1, LLONG_MAX, server.slowlog_log_slower_than, 10000, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("proto-max-bulk-len", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 1024 * 1024, LONG_MAX, server.proto_max_bulk_len, 512ll * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */ + createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL), + createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024 * 1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */ /* Unsigned Long Long configs */ - createULongLongConfig("maxmemory", - NULL, - MODIFIABLE_CONFIG, - 0, - ULLONG_MAX, - server.maxmemory, - 0, - MEMORY_CONFIG, - NULL, - updateMaxmemory), - createULongLongConfig("cluster-link-sendbuf-limit", - NULL, - MODIFIABLE_CONFIG, - 0, - ULLONG_MAX, - server.cluster_link_msg_queue_limit_bytes, - 0, - MEMORY_CONFIG, - NULL, - NULL), + createULongLongConfig("maxmemory", NULL, MODIFIABLE_CONFIG, 0, ULLONG_MAX, server.maxmemory, 0, MEMORY_CONFIG, NULL, updateMaxmemory), + createULongLongConfig("cluster-link-sendbuf-limit", NULL, MODIFIABLE_CONFIG, 0, ULLONG_MAX, server.cluster_link_msg_queue_limit_bytes, 0, MEMORY_CONFIG, NULL, NULL), /* Size_t configs */ - createSizeTConfig("hash-max-listpack-entries", - "hash-max-ziplist-entries", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.hash_max_listpack_entries, - 512, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("set-max-intset-entries", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.set_max_intset_entries, - 512, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("set-max-listpack-entries", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.set_max_listpack_entries, - 128, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("set-max-listpack-value", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.set_max_listpack_value, - 64, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("zset-max-listpack-entries", - "zset-max-ziplist-entries", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.zset_max_listpack_entries, - 128, - INTEGER_CONFIG, - NULL, - NULL), - createSizeTConfig("active-defrag-ignore-bytes", - NULL, - MODIFIABLE_CONFIG, - 1, - LLONG_MAX, - server.active_defrag_ignore_bytes, - 100 << 20, - MEMORY_CONFIG, - NULL, - NULL), /* Default: don't defrag if frag overhead is below 100mb */ - createSizeTConfig("hash-max-listpack-value", - "hash-max-ziplist-value", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.hash_max_listpack_value, - 64, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("stream-node-max-bytes", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.stream_node_max_bytes, - 4096, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("zset-max-listpack-value", - "zset-max-ziplist-value", - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.zset_max_listpack_value, - 64, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("hll-sparse-max-bytes", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.hll_sparse_max_bytes, - 3000, - MEMORY_CONFIG, - NULL, - NULL), - createSizeTConfig("tracking-table-max-keys", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.tracking_table_max_keys, - 1000000, - INTEGER_CONFIG, - NULL, - NULL), /* Default: 1 million keys max. */ - createSizeTConfig("client-query-buffer-limit", - NULL, - DEBUG_CONFIG | MODIFIABLE_CONFIG, - 1024 * 1024, - LONG_MAX, - server.client_max_querybuf_len, - 1024 * 1024 * 1024, - MEMORY_CONFIG, - NULL, - NULL), /* Default: 1GB max query buffer. */ - createSSizeTConfig("maxmemory-clients", - NULL, - MODIFIABLE_CONFIG, - -100, - SSIZE_MAX, - server.maxmemory_clients, - 0, - MEMORY_CONFIG | PERCENT_CONFIG, - NULL, - applyClientMaxMemoryUsage), + createSizeTConfig("hash-max-listpack-entries", "hash-max-ziplist-entries", MODIFIABLE_CONFIG, 0, LONG_MAX, server.hash_max_listpack_entries, 512, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("set-max-intset-entries", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.set_max_intset_entries, 512, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("set-max-listpack-entries", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.set_max_listpack_entries, 128, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("set-max-listpack-value", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.set_max_listpack_value, 64, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("zset-max-listpack-entries", "zset-max-ziplist-entries", MODIFIABLE_CONFIG, 0, LONG_MAX, server.zset_max_listpack_entries, 128, INTEGER_CONFIG, NULL, NULL), + createSizeTConfig("active-defrag-ignore-bytes", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.active_defrag_ignore_bytes, 100 << 20, MEMORY_CONFIG, NULL, NULL), /* Default: don't defrag if frag overhead is below 100mb */ + createSizeTConfig("hash-max-listpack-value", "hash-max-ziplist-value", MODIFIABLE_CONFIG, 0, LONG_MAX, server.hash_max_listpack_value, 64, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("stream-node-max-bytes", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.stream_node_max_bytes, 4096, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("zset-max-listpack-value", "zset-max-ziplist-value", MODIFIABLE_CONFIG, 0, LONG_MAX, server.zset_max_listpack_value, 64, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("hll-sparse-max-bytes", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.hll_sparse_max_bytes, 3000, MEMORY_CONFIG, NULL, NULL), + createSizeTConfig("tracking-table-max-keys", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.tracking_table_max_keys, 1000000, INTEGER_CONFIG, NULL, NULL), /* Default: 1 million keys max. */ + createSizeTConfig("client-query-buffer-limit", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 1024 * 1024, LONG_MAX, server.client_max_querybuf_len, 1024 * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), /* Default: 1GB max query buffer. */ + createSSizeTConfig("maxmemory-clients", NULL, MODIFIABLE_CONFIG, -100, SSIZE_MAX, server.maxmemory_clients, 0, MEMORY_CONFIG | PERCENT_CONFIG, NULL, applyClientMaxMemoryUsage), /* Other configs */ - createTimeTConfig("repl-backlog-ttl", - NULL, - MODIFIABLE_CONFIG, - 0, - LONG_MAX, - server.repl_backlog_time_limit, - 60 * 60, - INTEGER_CONFIG, - NULL, - NULL), /* Default: 1 hour */ - createOffTConfig("auto-aof-rewrite-min-size", - NULL, - MODIFIABLE_CONFIG, - 0, - LLONG_MAX, - server.aof_rewrite_min_size, - 64 * 1024 * 1024, - MEMORY_CONFIG, - NULL, - NULL), - createOffTConfig("loading-process-events-interval-bytes", - NULL, - MODIFIABLE_CONFIG | HIDDEN_CONFIG, - 1024, - INT_MAX, - server.loading_process_events_interval_bytes, - 1024 * 1024 * 2, - INTEGER_CONFIG, - NULL, - NULL), - - createIntConfig("tls-port", - NULL, - MODIFIABLE_CONFIG, - 0, - 65535, - server.tls_port, - 0, - INTEGER_CONFIG, - NULL, - applyTLSPort), /* TCP port. */ - createIntConfig("tls-session-cache-size", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.tls_ctx_config.session_cache_size, - 20 * 1024, - INTEGER_CONFIG, - NULL, - applyTlsCfg), - createIntConfig("tls-session-cache-timeout", - NULL, - MODIFIABLE_CONFIG, - 0, - INT_MAX, - server.tls_ctx_config.session_cache_timeout, - 300, - INTEGER_CONFIG, - NULL, - applyTlsCfg), + createTimeTConfig("repl-backlog-ttl", NULL, MODIFIABLE_CONFIG, 0, LONG_MAX, server.repl_backlog_time_limit, 60 * 60, INTEGER_CONFIG, NULL, NULL), /* Default: 1 hour */ + createOffTConfig("auto-aof-rewrite-min-size", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.aof_rewrite_min_size, 64 * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), + createOffTConfig("loading-process-events-interval-bytes", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 1024, INT_MAX, server.loading_process_events_interval_bytes, 1024 * 1024 * 2, INTEGER_CONFIG, NULL, NULL), + + /* Tls configs */ + createIntConfig("tls-port", NULL, MODIFIABLE_CONFIG, 0, 65535, server.tls_port, 0, INTEGER_CONFIG, NULL, applyTLSPort), /* TCP port. */ + createIntConfig("tls-session-cache-size", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_size, 20 * 1024, INTEGER_CONFIG, NULL, applyTlsCfg), + createIntConfig("tls-session-cache-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.tls_ctx_config.session_cache_timeout, 300, INTEGER_CONFIG, NULL, applyTlsCfg), createBoolConfig("tls-cluster", NULL, MODIFIABLE_CONFIG, server.tls_cluster, 0, NULL, applyTlsCfg), createBoolConfig("tls-replication", NULL, MODIFIABLE_CONFIG, server.tls_replication, 0, NULL, applyTlsCfg), - createEnumConfig("tls-auth-clients", - NULL, - MODIFIABLE_CONFIG, - tls_auth_clients_enum, - server.tls_auth_clients, - TLS_CLIENT_AUTH_YES, - NULL, - NULL), - createBoolConfig("tls-prefer-server-ciphers", - NULL, - MODIFIABLE_CONFIG, - server.tls_ctx_config.prefer_server_ciphers, - 0, - NULL, - applyTlsCfg), - createBoolConfig("tls-session-caching", - NULL, - MODIFIABLE_CONFIG, - server.tls_ctx_config.session_caching, - 1, - NULL, - applyTlsCfg), - createStringConfig("tls-cert-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.cert_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-key-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.key_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-key-file-pass", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.key_file_pass, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-client-cert-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.client_cert_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-client-key-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.client_key_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-client-key-file-pass", - NULL, - MODIFIABLE_CONFIG | SENSITIVE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.client_key_file_pass, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-dh-params-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.dh_params_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ca-cert-file", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ca_cert_file, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ca-cert-dir", - NULL, - VOLATILE_CONFIG | MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ca_cert_dir, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-protocols", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.protocols, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ciphers", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ciphers, - NULL, - NULL, - applyTlsCfg), - createStringConfig("tls-ciphersuites", - NULL, - MODIFIABLE_CONFIG, - EMPTY_STRING_IS_NULL, - server.tls_ctx_config.ciphersuites, - NULL, - NULL, - applyTlsCfg), + createEnumConfig("tls-auth-clients", NULL, MODIFIABLE_CONFIG, tls_auth_clients_enum, server.tls_auth_clients, TLS_CLIENT_AUTH_YES, NULL, NULL), + createBoolConfig("tls-prefer-server-ciphers", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.prefer_server_ciphers, 0, NULL, applyTlsCfg), + createBoolConfig("tls-session-caching", NULL, MODIFIABLE_CONFIG, server.tls_ctx_config.session_caching, 1, NULL, applyTlsCfg), + createStringConfig("tls-cert-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.cert_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-key-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-key-file-pass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.key_file_pass, NULL, NULL, applyTlsCfg), + createStringConfig("tls-client-cert-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_cert_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-client-key-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_key_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-client-key-file-pass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.client_key_file_pass, NULL, NULL, applyTlsCfg), + createStringConfig("tls-dh-params-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.dh_params_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ca-cert-file", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_file, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ca-cert-dir", NULL, VOLATILE_CONFIG | MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ca_cert_dir, NULL, NULL, applyTlsCfg), + createStringConfig("tls-protocols", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.protocols, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ciphers", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ciphers, NULL, NULL, applyTlsCfg), + createStringConfig("tls-ciphersuites", NULL, MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.tls_ctx_config.ciphersuites, NULL, NULL, applyTlsCfg), /* Special configs */ - createSpecialConfig("dir", - NULL, - MODIFIABLE_CONFIG | PROTECTED_CONFIG | DENY_LOADING_CONFIG, - setConfigDirOption, - getConfigDirOption, - rewriteConfigDirOption, - NULL), - createSpecialConfig("save", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigSaveOption, - getConfigSaveOption, - rewriteConfigSaveOption, - NULL), - createSpecialConfig("client-output-buffer-limit", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigClientOutputBufferLimitOption, - getConfigClientOutputBufferLimitOption, - rewriteConfigClientOutputBufferLimitOption, - NULL), - createSpecialConfig("oom-score-adj-values", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigOOMScoreAdjValuesOption, - getConfigOOMScoreAdjValuesOption, - rewriteConfigOOMScoreAdjValuesOption, - updateOOMScoreAdj), - createSpecialConfig("notify-keyspace-events", - NULL, - MODIFIABLE_CONFIG, - setConfigNotifyKeyspaceEventsOption, - getConfigNotifyKeyspaceEventsOption, - rewriteConfigNotifyKeyspaceEventsOption, - NULL), - createSpecialConfig("bind", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigBindOption, - getConfigBindOption, - rewriteConfigBindOption, - applyBind), - createSpecialConfig("replicaof", - "slaveof", - IMMUTABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigReplicaOfOption, - getConfigReplicaOfOption, - rewriteConfigReplicaOfOption, - NULL), - createSpecialConfig("latency-tracking-info-percentiles", - NULL, - MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, - setConfigLatencyTrackingInfoPercentilesOutputOption, - getConfigLatencyTrackingInfoPercentilesOutputOption, - rewriteConfigLatencyTrackingInfoPercentilesOutputOption, - NULL), + createSpecialConfig("dir", NULL, MODIFIABLE_CONFIG | PROTECTED_CONFIG | DENY_LOADING_CONFIG, setConfigDirOption, getConfigDirOption, rewriteConfigDirOption, NULL), + createSpecialConfig("save", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigSaveOption, getConfigSaveOption, rewriteConfigSaveOption, NULL), + createSpecialConfig("client-output-buffer-limit", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigClientOutputBufferLimitOption, getConfigClientOutputBufferLimitOption, rewriteConfigClientOutputBufferLimitOption, NULL), + createSpecialConfig("oom-score-adj-values", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigOOMScoreAdjValuesOption, getConfigOOMScoreAdjValuesOption, rewriteConfigOOMScoreAdjValuesOption, updateOOMScoreAdj), + createSpecialConfig("notify-keyspace-events", NULL, MODIFIABLE_CONFIG, setConfigNotifyKeyspaceEventsOption, getConfigNotifyKeyspaceEventsOption, rewriteConfigNotifyKeyspaceEventsOption, NULL), + createSpecialConfig("bind", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigBindOption, getConfigBindOption, rewriteConfigBindOption, applyBind), + createSpecialConfig("replicaof", "slaveof", IMMUTABLE_CONFIG | MULTI_ARG_CONFIG, setConfigReplicaOfOption, getConfigReplicaOfOption, rewriteConfigReplicaOfOption, NULL), + createSpecialConfig("latency-tracking-info-percentiles", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigLatencyTrackingInfoPercentilesOutputOption, getConfigLatencyTrackingInfoPercentilesOutputOption, rewriteConfigLatencyTrackingInfoPercentilesOutputOption, NULL), /* NULL Terminator, this is dropped when we convert to the runtime array. */ - {NULL}}; + {NULL} + /* clang-format on */ +}; /* Create a new config by copying the passed in config. Returns 1 on success * or 0 when their was already a config with the same name.. */ diff --git a/src/db.c b/src/db.c index fa07deeb4b..1843395d8c 100644 --- a/src/db.c +++ b/src/db.c @@ -29,7 +29,6 @@ #include "server.h" #include "cluster.h" -#include "atomicvar.h" #include "latency.h" #include "script.h" #include "functions.h" @@ -89,8 +88,8 @@ void updateLFU(robj *val) { * * Note: this function also returns NULL if the key is logically expired but * still existing, in case this is a replica and the LOOKUP_WRITE is not set. - * Even if the key expiry is master-driven, we can correctly report a key is - * expired on replicas even if the master is lagging expiring our key via DELs + * Even if the key expiry is primary-driven, we can correctly report a key is + * expired on replicas even if the primary is lagging expiring our key via DELs * in the replication link. */ robj *lookupKey(serverDb *db, robj *key, int flags) { dictEntry *de = dbFind(db, key->ptr); @@ -98,14 +97,14 @@ robj *lookupKey(serverDb *db, robj *key, int flags) { if (de) { val = dictGetVal(de); /* Forcing deletion of expired keys on a replica makes the replica - * inconsistent with the master. We forbid it on readonly replicas, but + * inconsistent with the primary. We forbid it on readonly replicas, but * we have to allow it on writable replicas to make write commands * behave consistently. * * It's possible that the WRITE flag is set even during a readonly * command, since the command may trigger events that cause modules to * perform additional writes. */ - int is_ro_replica = server.masterhost && server.repl_slave_ro; + int is_ro_replica = server.primary_host && server.repl_replica_ro; int expire_flags = 0; if (flags & LOOKUP_WRITE && !is_ro_replica) expire_flags |= EXPIRE_FORCE_DELETE_EXPIRED; if (flags & LOOKUP_NOEXPIRE) expire_flags |= EXPIRE_AVOID_DELETE_EXPIRED; @@ -123,6 +122,10 @@ robj *lookupKey(serverDb *db, robj *key, int flags) { server.current_client->cmd->proc != touchCommand) flags |= LOOKUP_NOTOUCH; if (!hasActiveChildProcess() && !(flags & LOOKUP_NOTOUCH)) { + if (!canUseSharedObject() && val->refcount == OBJ_SHARED_REFCOUNT) { + val = dupStringObject(val); + kvstoreDictSetVal(db->keys, getKeySlot(key->ptr), de, val); + } if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { updateLFU(val); } else { @@ -358,10 +361,10 @@ robj *dbRandomKey(serverDb *db) { key = dictGetKey(de); keyobj = createStringObject(key, sdslen(key)); if (dbFindExpires(db, key)) { - if (allvolatile && server.masterhost && --maxtries == 0) { + if (allvolatile && server.primary_host && --maxtries == 0) { /* If the DB is composed only of keys with an expire set, * it could happen that all the keys are already logically - * expired in the slave, so the function cannot stop because + * expired in the repilca, so the function cannot stop because * expireIfNeeded() is false, nor it can stop because * dictGetFairRandomKey() returns NULL (there are keys to return). * To prevent the infinite loop we do some tries, but if there @@ -537,7 +540,7 @@ long long emptyData(int dbnum, int flags, void(callback)(dict *)) { /* Empty the database structure. */ removed = emptyDbStructure(server.db, dbnum, async, callback); - if (dbnum == -1) flushSlaveKeysWithExpireList(); + if (dbnum == -1) flushReplicaKeysWithExpireList(); if (with_functions) { serverAssert(dbnum == -1); @@ -670,7 +673,7 @@ void flushAllDataAndResetRDB(int flags) { if (server.saveparamslen > 0) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE); + rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE); } #if defined(USE_JEMALLOC) @@ -1607,7 +1610,7 @@ void swapMainDbWithTempDb(serverDb *tempDb) { } trackingInvalidateKeysOnFlush(1); - flushSlaveKeysWithExpireList(); + flushReplicaKeysWithExpireList(); } /* SWAPDB db1 db2 */ @@ -1663,8 +1666,8 @@ void setExpire(client *c, serverDb *db, robj *key, long long when) { dictSetSignedIntegerVal(de, when); } - int writable_slave = server.masterhost && server.repl_slave_ro == 0; - if (c && writable_slave && !(c->flags & CLIENT_MASTER)) rememberSlaveKeyWithExpire(db, key); + int writable_replica = server.primary_host && server.repl_replica_ro == 0; + if (c && writable_replica && !(c->flags & CLIENT_PRIMARY)) rememberReplicaKeyWithExpire(db, key); } /* Return the expire time of the specified key, or -1 if no expire @@ -1691,7 +1694,7 @@ void deleteExpiredKeyAndPropagate(serverDb *db, robj *keyobj) { } /* Propagate an implicit key deletion into replicas and the AOF file. - * When a key was deleted in the master by eviction, expiration or a similar + * When a key was deleted in the primary by eviction, expiration or a similar * mechanism a DEL/UNLINK operation for this key is sent * to all the replicas and the AOF file if enabled. * @@ -1717,7 +1720,7 @@ void propagateDeletion(serverDb *db, robj *key, int lazy) { incrRefCount(argv[0]); incrRefCount(argv[1]); - /* If the master decided to delete a key we must propagate it to replicas no matter what. + /* If the primary decided to delete a key we must propagate it to replicas no matter what. * Even if module executed a command without asking for propagation. */ int prev_replication_allowed = server.replication_allowed; server.replication_allowed = 1; @@ -1752,13 +1755,13 @@ int keyIsExpired(serverDb *db, robj *key) { * * The behavior of the function depends on the replication role of the * instance, because by default replicas do not delete expired keys. They - * wait for DELs from the master for consistency matters. However even + * wait for DELs from the primary for consistency matters. However even * replicas will try to have a coherent return value for the function, * so that read commands executed in the replica side will be able to * behave like if the key is expired even if still present (because the - * master has yet to propagate the DEL). + * primary has yet to propagate the DEL). * - * In masters as a side effect of finding a key which is expired, such + * In primary as a side effect of finding a key which is expired, such * key will be evicted from the database. Also this may trigger the * propagation of a DEL/UNLINK command in AOF / replication stream. * @@ -1766,7 +1769,7 @@ int keyIsExpired(serverDb *db, robj *key) { * it still returns KEY_EXPIRED if the key is logically expired. To force deletion * of logically expired keys even on replicas, use the EXPIRE_FORCE_DELETE_EXPIRED * flag. Note though that if the current client is executing - * replicated commands from the master, keys are never considered expired. + * replicated commands from the primary, keys are never considered expired. * * On the other hand, if you just want expiration check, but need to avoid * the actual key deletion and propagation of the deletion, use the @@ -1781,7 +1784,7 @@ keyStatus expireIfNeeded(serverDb *db, robj *key, int flags) { /* If we are running in the context of a replica, instead of * evicting the expired key from the database, we return ASAP: - * the replica key expiration is controlled by the master that will + * the replica key expiration is controlled by the primary that will * send us synthesized DEL operations for expired keys. The * exception is when write operations are performed on writable * replicas. @@ -1790,15 +1793,15 @@ keyStatus expireIfNeeded(serverDb *db, robj *key, int flags) { * that is, KEY_VALID if we think the key should still be valid, * KEY_EXPIRED if we think the key is expired but don't want to delete it at this time. * - * When replicating commands from the master, keys are never considered + * When replicating commands from the primary, keys are never considered * expired. */ - if (server.masterhost != NULL) { - if (server.current_client && (server.current_client->flags & CLIENT_MASTER)) return KEY_VALID; + if (server.primary_host != NULL) { + if (server.current_client && (server.current_client->flags & CLIENT_PRIMARY)) return KEY_VALID; if (!(flags & EXPIRE_FORCE_DELETE_EXPIRED)) return KEY_EXPIRED; } /* In some cases we're explicitly instructed to return an indication of a - * missing key without actually deleting it, even on masters. */ + * missing key without actually deleting it, even on primaries. */ if (flags & EXPIRE_AVOID_DELETE_EXPIRED) return KEY_EXPIRED; /* If 'expire' action is paused, for whatever reason, then don't expire any key. diff --git a/src/debug.c b/src/debug.c index 51e9c6e9f6..6394e3f0f4 100644 --- a/src/debug.c +++ b/src/debug.c @@ -552,7 +552,7 @@ void debugCommand(client *c) { if (save) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - if (rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) != C_OK) { + if (rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) != C_OK) { addReplyErrorObject(c, shared.err); return; } @@ -845,7 +845,7 @@ void debugCommand(client *c) { server.aof_flush_sleep = atoi(c->argv[2]->ptr); addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "replicate") && c->argc >= 3) { - replicationFeedSlaves(-1, c->argv + 2, c->argc - 2); + replicationFeedReplicas(-1, c->argv + 2, c->argc - 2); addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "error") && c->argc == 3) { sds errstr = sdsnewlen("-", 1); @@ -925,7 +925,7 @@ void debugCommand(client *c) { addReply(c, shared.ok); } else if (!strcasecmp(c->argv[1]->ptr, "stringmatch-test") && c->argc == 2) { stringmatchlen_fuzz_test(); - addReplyStatus(c, "Apparently Redis did not crash: test passed"); + addReplyStatus(c, "Apparently the server did not crash: test passed"); } else if (!strcasecmp(c->argv[1]->ptr, "set-disable-deny-scripts") && c->argc == 3) { server.script_disable_deny_script = atoi(c->argv[2]->ptr); addReply(c, shared.ok); diff --git a/src/dict.c b/src/dict.c index 119c60ab57..bc92d49564 100644 --- a/src/dict.c +++ b/src/dict.c @@ -48,6 +48,8 @@ #include "serverassert.h" #include "monotonic.h" +#define UNUSED(V) ((void)V) + /* Using dictSetResizeEnabled() we make possible to disable * resizing and rehashing of the hash table as needed. This is very important * for us, as we use copy-on-write and don't want to move too much memory @@ -800,8 +802,9 @@ void dictSetKey(dict *d, dictEntry *de, void *key) { } void dictSetVal(dict *d, dictEntry *de, void *val) { + UNUSED(d); assert(entryHasValue(de)); - de->v.val = d->type->valDup ? d->type->valDup(d, val) : val; + de->v.val = val; } void dictSetSignedIntegerVal(dictEntry *de, int64_t val) { @@ -940,6 +943,8 @@ unsigned long long dictFingerprint(dict *d) { return hash; } +/* Initiaize a normal iterator. This function should be called when initializing + * an iterator on the stack. */ void dictInitIterator(dictIterator *iter, dict *d) { iter->d = d; iter->table = 0; @@ -949,6 +954,8 @@ void dictInitIterator(dictIterator *iter, dict *d) { iter->nextEntry = NULL; } +/* Initialize a safe iterator, which is allowed to modify the dictionary while iterating. + * You must call dictResetIterator when you are done with a safe iterator. */ void dictInitSafeIterator(dictIterator *iter, dict *d) { dictInitIterator(iter, d); iter->safe = 1; @@ -956,9 +963,10 @@ void dictInitSafeIterator(dictIterator *iter, dict *d) { void dictResetIterator(dictIterator *iter) { if (!(iter->index == -1 && iter->table == 0)) { - if (iter->safe) + if (iter->safe) { dictResumeRehashing(iter->d); - else + assert(iter->d->pauserehash >= 0); + } else assert(iter->fingerprint == dictFingerprint(iter->d)); } } @@ -1745,7 +1753,7 @@ char *stringFromLongLong(long long value) { return s; } -dictType BenchmarkDictType = {hashCallback, NULL, NULL, compareCallback, freeCallback, NULL, NULL}; +dictType BenchmarkDictType = {hashCallback, NULL, compareCallback, freeCallback, NULL, NULL}; #define start_benchmark() start = timeInMilliseconds() #define end_benchmark(msg) \ diff --git a/src/dict.h b/src/dict.h index 7ba22edf1e..723e5a54c2 100644 --- a/src/dict.h +++ b/src/dict.h @@ -54,7 +54,6 @@ typedef struct dictType { /* Callbacks */ uint64_t (*hashFunction)(const void *key); void *(*keyDup)(dict *d, const void *key); - void *(*valDup)(dict *d, const void *obj); int (*keyCompare)(dict *d, const void *key1, const void *key2); void (*keyDestructor)(dict *d, void *key); void (*valDestructor)(dict *d, void *obj); diff --git a/src/eval.c b/src/eval.c index d9c2c183d6..8c6db3f18b 100644 --- a/src/eval.c +++ b/src/eval.c @@ -71,7 +71,6 @@ static uint64_t dictStrCaseHash(const void *key) { dictType shaScriptObjectDictType = { dictStrCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictLuaScriptDestructor, /* val destructor */ @@ -100,7 +99,7 @@ struct ldbState { int bp[LDB_BREAKPOINTS_MAX]; /* An array of breakpoints line numbers. */ int bpcount; /* Number of valid entries inside bp. */ int step; /* Stop at next line regardless of breakpoints. */ - int luabp; /* Stop at next line because redis.breakpoint() was called. */ + int luabp; /* Stop at next line because server.breakpoint() was called. */ sds *src; /* Lua script source code split by line. */ int lines; /* Number of lines in 'src'. */ int currentline; /* Current line number. */ @@ -115,7 +114,7 @@ struct ldbState { /* Perform the SHA1 of the input string. We use this both for hashing script * bodies in order to obtain the Lua function name, and in the implementation - * of redis.sha1(). + * of server.sha1(). * * 'digest' should point to a 41 bytes buffer: 40 for SHA1 converted into an * hexadecimal number, plus 1 byte for null term. */ @@ -136,12 +135,12 @@ void sha1hex(char *digest, char *script, size_t len) { digest[40] = '\0'; } -/* redis.breakpoint() +/* server.breakpoint() * * Allows to stop execution during a debugging session from within * the Lua code implementation, like if a breakpoint was set in the code * immediately after the function. */ -int luaRedisBreakpointCommand(lua_State *lua) { +int luaServerBreakpointCommand(lua_State *lua) { if (ldb.active) { ldb.luabp = 1; lua_pushboolean(lua, 1); @@ -151,12 +150,12 @@ int luaRedisBreakpointCommand(lua_State *lua) { return 1; } -/* redis.debug() +/* server.debug() * * Log a string message into the output console. * Can take multiple arguments that will be separated by commas. * Nothing is returned to the caller. */ -int luaRedisDebugCommand(lua_State *lua) { +int luaServerDebugCommand(lua_State *lua) { if (!ldb.active) return 0; int argc = lua_gettop(lua); sds log = sdscatprintf(sdsempty(), " line %d: ", ldb.currentline); @@ -168,14 +167,14 @@ int luaRedisDebugCommand(lua_State *lua) { return 0; } -/* redis.replicate_commands() +/* server.replicate_commands() * * DEPRECATED: Now do nothing and always return true. * Turn on single commands replication if the script never called * a write command so far, and returns true. Otherwise if the script * already started to write, returns false and stick to whole scripts * replication, which is our default. */ -int luaRedisReplicateCommandsCommand(lua_State *lua) { +int luaServerReplicateCommandsCommand(lua_State *lua) { lua_pushboolean(lua, 1); return 1; } @@ -206,27 +205,27 @@ void scriptingInit(int setup) { lctx.lua_scripts_lru_list = listCreate(); lctx.lua_scripts_mem = 0; - luaRegisterRedisAPI(lua); + luaRegisterServerAPI(lua); /* register debug commands */ - lua_getglobal(lua, "redis"); + lua_getglobal(lua, "server"); - /* redis.breakpoint */ + /* server.breakpoint */ lua_pushstring(lua, "breakpoint"); - lua_pushcfunction(lua, luaRedisBreakpointCommand); + lua_pushcfunction(lua, luaServerBreakpointCommand); lua_settable(lua, -3); - /* redis.debug */ + /* server.debug */ lua_pushstring(lua, "debug"); - lua_pushcfunction(lua, luaRedisDebugCommand); + lua_pushcfunction(lua, luaServerDebugCommand); lua_settable(lua, -3); - /* redis.replicate_commands */ + /* server.replicate_commands */ lua_pushstring(lua, "replicate_commands"); - lua_pushcfunction(lua, luaRedisReplicateCommandsCommand); + lua_pushcfunction(lua, luaServerReplicateCommandsCommand); lua_settable(lua, -3); - lua_setglobal(lua, "redis"); + lua_setglobal(lua, "server"); /* Add a helper function we use for pcall error reporting. * Note that when the error is in the C function we want to report the @@ -1205,50 +1204,48 @@ void ldbLogStackValue(lua_State *lua, char *prefix) { ldbLogWithMaxLen(s); } -char *ldbRedisProtocolToHuman_Int(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Bulk(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Status(sds *o, char *reply); -char *ldbRedisProtocolToHuman_MultiBulk(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Set(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Map(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Null(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Bool(sds *o, char *reply); -char *ldbRedisProtocolToHuman_Double(sds *o, char *reply); +char *ldbRespToHuman_Int(sds *o, char *reply); +char *ldbRespToHuman_Bulk(sds *o, char *reply); +char *ldbRespToHuman_Status(sds *o, char *reply); +char *ldbRespToHuman_MultiBulk(sds *o, char *reply); +char *ldbRespToHuman_Set(sds *o, char *reply); +char *ldbRespToHuman_Map(sds *o, char *reply); +char *ldbRespToHuman_Null(sds *o, char *reply); +char *ldbRespToHuman_Bool(sds *o, char *reply); +char *ldbRespToHuman_Double(sds *o, char *reply); /* Get RESP from 'reply' and appends it in human readable form to * the passed SDS string 'o'. * * Note that the SDS string is passed by reference (pointer of pointer to * char*) so that we can return a modified pointer, as for SDS semantics. */ -char *ldbRedisProtocolToHuman(sds *o, char *reply) { +char *ldbRespToHuman(sds *o, char *reply) { char *p = reply; - /* clang-format off */ - switch(*p) { - case ':': p = ldbRedisProtocolToHuman_Int(o,reply); break; - case '$': p = ldbRedisProtocolToHuman_Bulk(o,reply); break; - case '+': p = ldbRedisProtocolToHuman_Status(o,reply); break; - case '-': p = ldbRedisProtocolToHuman_Status(o,reply); break; - case '*': p = ldbRedisProtocolToHuman_MultiBulk(o,reply); break; - case '~': p = ldbRedisProtocolToHuman_Set(o,reply); break; - case '%': p = ldbRedisProtocolToHuman_Map(o,reply); break; - case '_': p = ldbRedisProtocolToHuman_Null(o,reply); break; - case '#': p = ldbRedisProtocolToHuman_Bool(o,reply); break; - case ',': p = ldbRedisProtocolToHuman_Double(o,reply); break; + switch (*p) { + case ':': p = ldbRespToHuman_Int(o, reply); break; + case '$': p = ldbRespToHuman_Bulk(o, reply); break; + case '+': p = ldbRespToHuman_Status(o, reply); break; + case '-': p = ldbRespToHuman_Status(o, reply); break; + case '*': p = ldbRespToHuman_MultiBulk(o, reply); break; + case '~': p = ldbRespToHuman_Set(o, reply); break; + case '%': p = ldbRespToHuman_Map(o, reply); break; + case '_': p = ldbRespToHuman_Null(o, reply); break; + case '#': p = ldbRespToHuman_Bool(o, reply); break; + case ',': p = ldbRespToHuman_Double(o, reply); break; } - /* clang-format on */ return p; } -/* The following functions are helpers for ldbRedisProtocolToHuman(), each +/* The following functions are helpers for ldbRespToHuman(), each * take care of a given RESP return type. */ -char *ldbRedisProtocolToHuman_Int(sds *o, char *reply) { +char *ldbRespToHuman_Int(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatlen(*o, reply + 1, p - reply - 1); return p + 2; } -char *ldbRedisProtocolToHuman_Bulk(sds *o, char *reply) { +char *ldbRespToHuman_Bulk(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long bulklen; @@ -1262,14 +1259,14 @@ char *ldbRedisProtocolToHuman_Bulk(sds *o, char *reply) { } } -char *ldbRedisProtocolToHuman_Status(sds *o, char *reply) { +char *ldbRespToHuman_Status(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatrepr(*o, reply, p - reply); return p + 2; } -char *ldbRedisProtocolToHuman_MultiBulk(sds *o, char *reply) { +char *ldbRespToHuman_MultiBulk(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long mbulklen; int j = 0; @@ -1282,14 +1279,14 @@ char *ldbRedisProtocolToHuman_MultiBulk(sds *o, char *reply) { } *o = sdscatlen(*o, "[", 1); for (j = 0; j < mbulklen; j++) { - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); if (j != mbulklen - 1) *o = sdscatlen(*o, ",", 1); } *o = sdscatlen(*o, "]", 1); return p; } -char *ldbRedisProtocolToHuman_Set(sds *o, char *reply) { +char *ldbRespToHuman_Set(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long mbulklen; int j = 0; @@ -1298,14 +1295,14 @@ char *ldbRedisProtocolToHuman_Set(sds *o, char *reply) { p += 2; *o = sdscatlen(*o, "~(", 2); for (j = 0; j < mbulklen; j++) { - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); if (j != mbulklen - 1) *o = sdscatlen(*o, ",", 1); } *o = sdscatlen(*o, ")", 1); return p; } -char *ldbRedisProtocolToHuman_Map(sds *o, char *reply) { +char *ldbRespToHuman_Map(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); long long mbulklen; int j = 0; @@ -1314,22 +1311,22 @@ char *ldbRedisProtocolToHuman_Map(sds *o, char *reply) { p += 2; *o = sdscatlen(*o, "{", 1); for (j = 0; j < mbulklen; j++) { - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); *o = sdscatlen(*o, " => ", 4); - p = ldbRedisProtocolToHuman(o, p); + p = ldbRespToHuman(o, p); if (j != mbulklen - 1) *o = sdscatlen(*o, ",", 1); } *o = sdscatlen(*o, "}", 1); return p; } -char *ldbRedisProtocolToHuman_Null(sds *o, char *reply) { +char *ldbRespToHuman_Null(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatlen(*o, "(null)", 6); return p + 2; } -char *ldbRedisProtocolToHuman_Bool(sds *o, char *reply) { +char *ldbRespToHuman_Bool(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); if (reply[1] == 't') *o = sdscatlen(*o, "#true", 5); @@ -1338,7 +1335,7 @@ char *ldbRedisProtocolToHuman_Bool(sds *o, char *reply) { return p + 2; } -char *ldbRedisProtocolToHuman_Double(sds *o, char *reply) { +char *ldbRespToHuman_Double(sds *o, char *reply) { char *p = strchr(reply + 1, '\r'); *o = sdscatlen(*o, "(double) ", 9); *o = sdscatlen(*o, reply + 1, p - reply - 1); @@ -1348,9 +1345,9 @@ char *ldbRedisProtocolToHuman_Double(sds *o, char *reply) { /* Log a RESP reply as debugger output, in a human readable format. * If the resulting string is longer than 'len' plus a few more chars * used as prefix, it gets truncated. */ -void ldbLogRedisReply(char *reply) { +void ldbLogRespReply(char *reply) { sds log = sdsnew(" "); - ldbRedisProtocolToHuman(&log, reply); + ldbRespToHuman(&log, reply); ldbLogWithMaxLen(log); } @@ -1488,29 +1485,29 @@ void ldbEval(lua_State *lua, sds *argv, int argc) { } /* Implement the debugger "server" command. We use a trick in order to make - * the implementation very simple: we just call the Lua redis.call() command + * the implementation very simple: we just call the Lua server.call() command * implementation, with ldb.step enabled, so as a side effect the command * and its reply are logged. */ -void ldbRedis(lua_State *lua, sds *argv, int argc) { +void ldbServer(lua_State *lua, sds *argv, int argc) { int j; if (!lua_checkstack(lua, argc + 1)) { /* Increase the Lua stack if needed to make sure there is enough room * to push 'argc + 1' elements to the stack. On failure, return error. * Notice that we need, in worst case, 'argc + 1' elements because we push all the arguments - * given by the user (without the first argument) and we also push the 'redis' global table and - * 'redis.call' function so: - * (1 (redis table)) + (1 (redis.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/ - ldbLogRedisReply("max lua stack reached"); + * given by the user (without the first argument) and we also push the 'server' global table and + * 'server.call' function so: + * (1 (server table)) + (1 (server.call function)) + (argc - 1 (all arguments without the first)) = argc + 1*/ + ldbLogRespReply("max lua stack reached"); return; } - lua_getglobal(lua, "redis"); + lua_getglobal(lua, "server"); lua_pushstring(lua, "call"); - lua_gettable(lua, -2); /* Stack: redis, redis.call */ + lua_gettable(lua, -2); /* Stack: server, server.call */ for (j = 1; j < argc; j++) lua_pushlstring(lua, argv[j], sdslen(argv[j])); - ldb.step = 1; /* Force redis.call() to log. */ - lua_pcall(lua, argc - 1, 1, 0); /* Stack: redis, result */ + ldb.step = 1; /* Force server.call() to log. */ + lua_pcall(lua, argc - 1, 1, 0); /* Stack: server, result */ ldb.step = 0; /* Disable logging. */ lua_pop(lua, 2); /* Discard the result and clean the stack. */ } @@ -1593,7 +1590,7 @@ int ldbRepl(lua_State *lua) { /* Execute the command. */ if (!strcasecmp(argv[0], "h") || !strcasecmp(argv[0], "help")) { - ldbLog(sdsnew("Redis Lua debugger help:")); + ldbLog(sdsnew("Lua debugger help:")); ldbLog(sdsnew("[h]elp Show this help.")); ldbLog(sdsnew("[s]tep Run current line and stop again.")); ldbLog(sdsnew("[n]ext Alias for step.")); @@ -1613,15 +1610,15 @@ int ldbRepl(lua_State *lua) { ldbLog(sdsnew("[b]reak 0 Remove all breakpoints.")); ldbLog(sdsnew("[t]race Show a backtrace.")); ldbLog(sdsnew("[e]val Execute some Lua code (in a different callframe).")); - ldbLog(sdsnew("[r]edis Execute a Redis command.")); - ldbLog(sdsnew("[m]axlen [len] Trim logged Redis replies and Lua var dumps to len.")); + ldbLog(sdsnew("[v]alkey Execute a command.")); + ldbLog(sdsnew("[m]axlen [len] Trim logged replies and Lua var dumps to len.")); ldbLog(sdsnew(" Specifying zero as means unlimited.")); ldbLog(sdsnew("[a]bort Stop the execution of the script. In sync")); ldbLog(sdsnew(" mode dataset changes will be retained.")); ldbLog(sdsnew("")); ldbLog(sdsnew("Debugger functions you can call from Lua scripts:")); - ldbLog(sdsnew("redis.debug() Produce logs in the debugger console.")); - ldbLog(sdsnew("redis.breakpoint() Stop execution like if there was a breakpoint in the")); + ldbLog(sdsnew("server.debug() Produce logs in the debugger console.")); + ldbLog(sdsnew("server.breakpoint() Stop execution like if there was a breakpoint in the")); ldbLog(sdsnew(" next line of code.")); ldbSendLogs(); } else if (!strcasecmp(argv[0], "s") || !strcasecmp(argv[0], "step") || !strcasecmp(argv[0], "n") || @@ -1645,9 +1642,13 @@ int ldbRepl(lua_State *lua) { } else if (!strcasecmp(argv[0], "a") || !strcasecmp(argv[0], "abort")) { luaPushError(lua, "script aborted for user request"); luaError(lua); - } else if (argc > 1 && (!strcasecmp(argv[0], "r") || !strcasecmp(argv[0], REDIS_API_NAME) || + } else if (argc > 1 && ((!strcasecmp(argv[0], "r") || !strcasecmp(argv[0], "redis")) || + (!strcasecmp(argv[0], "v") || !strcasecmp(argv[0], "valkey")) || !strcasecmp(argv[0], SERVER_API_NAME))) { - ldbRedis(lua, argv, argc); + /* [r]redis or [v]alkey calls a command. We accept "server" too, but + * not "s" because that's "step". Neither can we use [c]all because + * "c" is continue. */ + ldbServer(lua, argv, argc); ldbSendLogs(); } else if ((!strcasecmp(argv[0], "p") || !strcasecmp(argv[0], "print"))) { if (argc == 2) @@ -1668,7 +1669,7 @@ int ldbRepl(lua_State *lua) { ldbList(1, 1000000); ldbSendLogs(); } else { - ldbLog(sdsnew(" Unknown Redis Lua debugger command or " + ldbLog(sdsnew(" Unknown Lua debugger command or " "wrong number of arguments.")); ldbSendLogs(); } @@ -1712,7 +1713,7 @@ void luaLdbLineHook(lua_State *lua, lua_Debug *ar) { if (ldb.step || bp) { char *reason = "step over"; if (bp) - reason = ldb.luabp ? "redis.breakpoint() called" : "break point"; + reason = ldb.luabp ? "server.breakpoint() called" : "break point"; else if (timeout) reason = "timeout reached, infinite loop?"; ldb.step = 0; diff --git a/src/evict.c b/src/evict.c index 4a51974ac6..fb04616871 100644 --- a/src/evict.c +++ b/src/evict.c @@ -32,7 +32,6 @@ #include "server.h" #include "bio.h" -#include "atomicvar.h" #include "script.h" #include @@ -322,7 +321,7 @@ unsigned long LFUDecrAndReturn(robj *o) { return counter; } -/* We don't want to count AOF buffers and slaves output buffers as +/* We don't want to count AOF buffers and replicas output buffers as * used memory: the eviction should use mostly data size, because * it can cause feedback-loop when we push DELs into them, putting * more and more DELs will make them bigger, if we count them, we @@ -378,7 +377,7 @@ size_t freeMemoryGetNotCountedMemory(void) { * 'total' total amount of bytes used. * (Populated both for C_ERR and C_OK) * - * 'logical' the amount of memory used minus the slaves/AOF buffers. + * 'logical' the amount of memory used minus the replicas/AOF buffers. * (Populated when C_ERR is returned) * * 'tofree' the amount of memory that should be released @@ -394,7 +393,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev size_t mem_reported, mem_used, mem_tofree; /* Check if we are over the memory usage limit. If we are not, no need - * to subtract the slaves output buffers. We can just return ASAP. */ + * to subtract the replicas output buffers. We can just return ASAP. */ mem_reported = zmalloc_used_memory(); if (total) *total = mem_reported; @@ -405,7 +404,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev } if (mem_reported <= server.maxmemory && !level) return C_OK; - /* Remove the size of slaves output buffers and AOF buffer from the + /* Remove the size of replicas output buffers and AOF buffer from the * count of used memory. */ mem_used = mem_reported; size_t overhead = freeMemoryGetNotCountedMemory(); @@ -478,8 +477,8 @@ static int isSafeToPerformEvictions(void) { if (isInsideYieldingLongCommand() || server.loading) return 0; /* By default replicas should ignore maxmemory - * and just be masters exact copies. */ - if (server.masterhost && server.repl_slave_ignore_maxmemory) return 0; + * and just be primaries exact copies. */ + if (server.primary_host && server.repl_replica_ignore_maxmemory) return 0; /* If 'evict' action is paused, for whatever reason, then return false */ if (isPausedActionsWithUpdate(PAUSE_ACTION_EVICT)) return 0; @@ -539,7 +538,7 @@ int performEvictions(void) { long long mem_freed = 0; /* Maybe become negative */ mstime_t latency, eviction_latency; long long delta; - int slaves = listLength(server.slaves); + int replicas = listLength(server.replicas); int result = EVICT_FAIL; if (getMaxmemoryState(&mem_reported, NULL, &mem_tofree, NULL) == C_OK) { @@ -698,7 +697,7 @@ int performEvictions(void) { * start spending so much time here that is impossible to * deliver data to the replicas fast enough, so we force the * transmission here inside the loop. */ - if (slaves) flushSlavesOutputBuffers(); + if (replicas) flushReplicasOutputBuffers(); /* Normally our stop condition is the ability to release * a fixed, pre-computed amount of memory. However when we diff --git a/src/expire.c b/src/expire.c index 8d81473209..05abb9580a 100644 --- a/src/expire.c +++ b/src/expire.c @@ -368,21 +368,21 @@ void activeExpireCycle(int type) { } /*----------------------------------------------------------------------------- - * Expires of keys created in writable slaves + * Expires of keys created in writable replicas * - * Normally slaves do not process expires: they wait the masters to synthesize - * DEL operations in order to retain consistency. However writable slaves are - * an exception: if a key is created in the slave and an expire is assigned - * to it, we need a way to expire such a key, since the master does not know + * Normally replicas do not process expires: they wait the primaries to synthesize + * DEL operations in order to retain consistency. However writable replicas are + * an exception: if a key is created in the replica and an expire is assigned + * to it, we need a way to expire such a key, since the primary does not know * anything about such a key. * - * In order to do so, we track keys created in the slave side with an expire - * set, and call the expireSlaveKeys() function from time to time in order to + * In order to do so, we track keys created in the replica side with an expire + * set, and call the expirereplicaKeys() function from time to time in order to * reclaim the keys if they already expired. * * Note that the use case we are trying to cover here, is a popular one where - * slaves are put in writable mode in order to compute slow operations in - * the slave side that are mostly useful to actually read data in a more + * replicas are put in writable mode in order to compute slow operations in + * the replica side that are mostly useful to actually read data in a more * processed way. Think at sets intersections in a tmp key, with an expire so * that it is also used as a cache to avoid intersecting every time. * @@ -391,9 +391,9 @@ void activeExpireCycle(int type) { *----------------------------------------------------------------------------*/ /* The dictionary where we remember key names and database ID of keys we may - * want to expire from the slave. Since this function is not often used we + * want to expire from the replica. Since this function is not often used we * don't even care to initialize the database at startup. We'll do it once - * the feature is used the first time, that is, when rememberSlaveKeyWithExpire() + * the feature is used the first time, that is, when rememberreplicaKeyWithExpire() * is called. * * The dictionary has an SDS string representing the key as the hash table @@ -402,17 +402,17 @@ void activeExpireCycle(int type) { * with a DB id > 63 are not expired, but a trivial fix is to set the bitmap * to the max 64 bit unsigned value when we know there is a key with a DB * ID greater than 63, and check all the configured DBs in such a case. */ -dict *slaveKeysWithExpire = NULL; +dict *replicaKeysWithExpire = NULL; -/* Check the set of keys created by the master with an expire set in order to +/* Check the set of keys created by the primary with an expire set in order to * check if they should be evicted. */ -void expireSlaveKeys(void) { - if (slaveKeysWithExpire == NULL || dictSize(slaveKeysWithExpire) == 0) return; +void expireReplicaKeys(void) { + if (replicaKeysWithExpire == NULL || dictSize(replicaKeysWithExpire) == 0) return; int cycles = 0, noexpire = 0; mstime_t start = mstime(); while (1) { - dictEntry *de = dictGetRandomKey(slaveKeysWithExpire); + dictEntry *de = dictGetRandomKey(replicaKeysWithExpire); sds keyname = dictGetKey(de); uint64_t dbids = dictGetUnsignedIntegerVal(de); uint64_t new_dbids = 0; @@ -447,46 +447,45 @@ void expireSlaveKeys(void) { } /* Set the new bitmap as value of the key, in the dictionary - * of keys with an expire set directly in the writable slave. Otherwise + * of keys with an expire set directly in the writable replica. Otherwise * if the bitmap is zero, we no longer need to keep track of it. */ if (new_dbids) dictSetUnsignedIntegerVal(de, new_dbids); else - dictDelete(slaveKeysWithExpire, keyname); + dictDelete(replicaKeysWithExpire, keyname); /* Stop conditions: found 3 keys we can't expire in a row or * time limit was reached. */ cycles++; if (noexpire > 3) break; if ((cycles % 64) == 0 && mstime() - start > 1) break; - if (dictSize(slaveKeysWithExpire) == 0) break; + if (dictSize(replicaKeysWithExpire) == 0) break; } } /* Track keys that received an EXPIRE or similar command in the context - * of a writable slave. */ -void rememberSlaveKeyWithExpire(serverDb *db, robj *key) { - if (slaveKeysWithExpire == NULL) { + * of a writable replica. */ +void rememberReplicaKeyWithExpire(serverDb *db, robj *key) { + if (replicaKeysWithExpire == NULL) { static dictType dt = { dictSdsHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ NULL /* allow to expand */ }; - slaveKeysWithExpire = dictCreate(&dt); + replicaKeysWithExpire = dictCreate(&dt); } if (db->id > 63) return; - dictEntry *de = dictAddOrFind(slaveKeysWithExpire, key->ptr); + dictEntry *de = dictAddOrFind(replicaKeysWithExpire, key->ptr); /* If the entry was just created, set it to a copy of the SDS string * representing the key: we don't want to need to take those keys - * in sync with the main DB. The keys will be removed by expireSlaveKeys() + * in sync with the main DB. The keys will be removed by expireReplicaKeys() * as it scans to find keys to remove. */ if (dictGetKey(de) == key->ptr) { - dictSetKey(slaveKeysWithExpire, de, sdsdup(key->ptr)); + dictSetKey(replicaKeysWithExpire, de, sdsdup(key->ptr)); dictSetUnsignedIntegerVal(de, 0); } @@ -496,34 +495,34 @@ void rememberSlaveKeyWithExpire(serverDb *db, robj *key) { } /* Return the number of keys we are tracking. */ -size_t getSlaveKeyWithExpireCount(void) { - if (slaveKeysWithExpire == NULL) return 0; - return dictSize(slaveKeysWithExpire); +size_t getReplicaKeyWithExpireCount(void) { + if (replicaKeysWithExpire == NULL) return 0; + return dictSize(replicaKeysWithExpire); } /* Remove the keys in the hash table. We need to do that when data is - * flushed from the server. We may receive new keys from the master with + * flushed from the server. We may receive new keys from the primary with * the same name/db and it is no longer a good idea to expire them. * * Note: technically we should handle the case of a single DB being flushed * but it is not worth it since anyway race conditions using the same set - * of key names in a writable slave and in its master will lead to + * of key names in a writable replica and in its primary will lead to * inconsistencies. This is just a best-effort thing we do. */ -void flushSlaveKeysWithExpireList(void) { - if (slaveKeysWithExpire) { - dictRelease(slaveKeysWithExpire); - slaveKeysWithExpire = NULL; +void flushReplicaKeysWithExpireList(void) { + if (replicaKeysWithExpire) { + dictRelease(replicaKeysWithExpire); + replicaKeysWithExpire = NULL; } } int checkAlreadyExpired(long long when) { /* EXPIRE with negative TTL, or EXPIREAT with a timestamp into the past * should never be executed as a DEL when load the AOF or in the context - * of a slave instance. + * of a replica instance. * * Instead we add the already expired key to the database with expire time - * (possibly in the past) and wait for an explicit DEL from the master. */ - return (when <= commandTimeSnapshot() && !server.loading && !server.masterhost); + * (possibly in the past) and wait for an explicit DEL from the primary. */ + return (when <= commandTimeSnapshot() && !server.loading && !server.primary_host); } #define EXPIRE_NX (1 << 0) diff --git a/src/function_lua.c b/src/function_lua.c index 54453a8f35..685485e37e 100644 --- a/src/function_lua.c +++ b/src/function_lua.c @@ -274,7 +274,7 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs luaFunctionCtx *lua_f_ctx = NULL; uint64_t flags = 0; if (!lua_istable(lua, 1)) { - err = "calling redis.register_function with a single argument is only applicable to Lua table (representing " + err = "calling server.register_function with a single argument is only applicable to Lua table (representing " "named arguments)."; goto error; } @@ -284,23 +284,23 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs while (lua_next(lua, -2)) { /* Stack now: table, key, value */ if (!lua_isstring(lua, -2)) { - err = "named argument key given to redis.register_function is not a string"; + err = "named argument key given to server.register_function is not a string"; goto error; } const char *key = lua_tostring(lua, -2); if (!strcasecmp(key, "function_name")) { if (!(name = luaGetStringSds(lua, -1))) { - err = "function_name argument given to redis.register_function must be a string"; + err = "function_name argument given to server.register_function must be a string"; goto error; } } else if (!strcasecmp(key, "description")) { if (!(desc = luaGetStringSds(lua, -1))) { - err = "description argument given to redis.register_function must be a string"; + err = "description argument given to server.register_function must be a string"; goto error; } } else if (!strcasecmp(key, "callback")) { if (!lua_isfunction(lua, -1)) { - err = "callback argument given to redis.register_function must be a function"; + err = "callback argument given to server.register_function must be a function"; goto error; } int lua_function_ref = luaL_ref(lua, LUA_REGISTRYINDEX); @@ -310,7 +310,7 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs continue; /* value was already popped, so no need to pop it out. */ } else if (!strcasecmp(key, "flags")) { if (!lua_istable(lua, -1)) { - err = "flags argument to redis.register_function must be a table representing function flags"; + err = "flags argument to server.register_function must be a table representing function flags"; goto error; } if (luaRegisterFunctionReadFlags(lua, &flags) != C_OK) { @@ -319,19 +319,19 @@ static int luaRegisterFunctionReadNamedArgs(lua_State *lua, registerFunctionArgs } } else { /* unknown argument was given, raise an error */ - err = "unknown argument given to redis.register_function"; + err = "unknown argument given to server.register_function"; goto error; } lua_pop(lua, 1); /* pop the value to continue the iteration */ } if (!name) { - err = "redis.register_function must get a function name argument"; + err = "server.register_function must get a function name argument"; goto error; } if (!lua_f_ctx) { - err = "redis.register_function must get a callback argument"; + err = "server.register_function must get a callback argument"; goto error; } @@ -355,12 +355,12 @@ static int luaRegisterFunctionReadPositionalArgs(lua_State *lua, registerFunctio sds name = NULL; luaFunctionCtx *lua_f_ctx = NULL; if (!(name = luaGetStringSds(lua, 1))) { - err = "first argument to redis.register_function must be a string"; + err = "first argument to server.register_function must be a string"; goto error; } if (!lua_isfunction(lua, 2)) { - err = "second argument to redis.register_function must be a function"; + err = "second argument to server.register_function must be a function"; goto error; } @@ -382,7 +382,7 @@ static int luaRegisterFunctionReadPositionalArgs(lua_State *lua, registerFunctio static int luaRegisterFunctionReadArgs(lua_State *lua, registerFunctionArgs *register_f_args) { int argc = lua_gettop(lua); if (argc < 1 || argc > 2) { - luaPushError(lua, "wrong number of arguments to redis.register_function"); + luaPushError(lua, "wrong number of arguments to server.register_function"); return C_ERR; } @@ -398,7 +398,7 @@ static int luaRegisterFunction(lua_State *lua) { loadCtx *load_ctx = luaGetFromRegistry(lua, REGISTRY_LOAD_CTX_NAME); if (!load_ctx) { - luaPushError(lua, "redis.register_function can only be called on FUNCTION LOAD command"); + luaPushError(lua, "server.register_function can only be called on FUNCTION LOAD command"); return luaError(lua); } @@ -423,7 +423,7 @@ int luaEngineInitEngine(void) { luaEngineCtx *lua_engine_ctx = zmalloc(sizeof(*lua_engine_ctx)); lua_engine_ctx->lua = lua_open(); - luaRegisterRedisAPI(lua_engine_ctx->lua); + luaRegisterServerAPI(lua_engine_ctx->lua); /* Register the library commands table and fields and store it to registry */ lua_newtable(lua_engine_ctx->lua); /* load library globals */ diff --git a/src/functions.c b/src/functions.c index 76e40c5231..08d869f026 100644 --- a/src/functions.c +++ b/src/functions.c @@ -31,7 +31,6 @@ #include "sds.h" #include "dict.h" #include "adlist.h" -#include "atomicvar.h" #define LOAD_TIMEOUT_MS 500 @@ -66,7 +65,6 @@ typedef struct functionsLibMetaData { dictType engineDictType = { dictSdsCaseHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -76,7 +74,6 @@ dictType engineDictType = { dictType functionDictType = { dictSdsCaseHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL, /* val destructor */ @@ -86,7 +83,6 @@ dictType functionDictType = { dictType engineStatsDictType = { dictSdsCaseHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ engineStatsDispose, /* val destructor */ @@ -96,7 +92,6 @@ dictType engineStatsDictType = { dictType libraryFunctionDictType = { dictSdsHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ engineFunctionDispose, /* val destructor */ @@ -106,7 +101,6 @@ dictType libraryFunctionDictType = { dictType librariesDictType = { dictSdsHash, /* hash function */ dictSdsDup, /* key dup */ - NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ engineLibraryDispose, /* val destructor */ diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 0fb30d9bda..f9bce26634 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -903,8 +903,8 @@ int hllSparseSet(robj *o, long index, uint8_t count) { * convert from sparse to dense a register requires to be updated. * * Note that this in turn means that PFADD will make sure the command - * is propagated to slaves / AOF, so if there is a sparse -> dense - * conversion, it will be performed in all the slaves as well. */ + * is propagated to replicas / AOF, so if there is a sparse -> dense + * conversion, it will be performed in all the replicas as well. */ int dense_retval = hllDenseSet(hdr->registers, index, count); serverAssert(dense_retval == 1); return dense_retval; diff --git a/src/kvstore.c b/src/kvstore.c index 70e2043157..a43b72e1e1 100644 --- a/src/kvstore.c +++ b/src/kvstore.c @@ -48,6 +48,8 @@ #define UNUSED(V) ((void)V) +static dict *kvstoreIteratorNextDict(kvstoreIterator *kvs_it); + struct _kvstore { int flags; dictType dtype; @@ -572,7 +574,7 @@ void kvstoreIteratorRelease(kvstoreIterator *kvs_it) { } /* Returns next dictionary from the iterator, or NULL if iteration is complete. */ -dict *kvstoreIteratorNextDict(kvstoreIterator *kvs_it) { +static dict *kvstoreIteratorNextDict(kvstoreIterator *kvs_it) { if (kvs_it->next_didx == -1) return NULL; /* The dict may be deleted during the iteration process, so here need to check for NULL. */ @@ -600,13 +602,6 @@ dictEntry *kvstoreIteratorNext(kvstoreIterator *kvs_it) { if (!de) { /* No current dict or reached the end of the dictionary. */ dict *d = kvstoreIteratorNextDict(kvs_it); if (!d) return NULL; - if (kvs_it->di.d) { - /* Before we move to the next dict, reset the iter of the previous dict. */ - dictIterator *iter = &kvs_it->di; - dictResetIterator(iter); - /* In the safe iterator context, we may delete entries. */ - freeDictIfNeeded(kvs_it->kvs, kvs_it->didx); - } dictInitSafeIterator(&kvs_it->di, d); de = dictNext(&kvs_it->di); } @@ -794,8 +789,9 @@ void kvstoreDictSetKey(kvstore *kvs, int didx, dictEntry *de, void *key) { } void kvstoreDictSetVal(kvstore *kvs, int didx, dictEntry *de, void *val) { - dict *d = kvstoreGetDict(kvs, didx); - dictSetVal(d, de, val); + UNUSED(kvs); + UNUSED(didx); + dictSetVal(NULL, de, val); } dictEntry * diff --git a/src/kvstore.h b/src/kvstore.h index d3c5949d1f..e7e21f8aa9 100644 --- a/src/kvstore.h +++ b/src/kvstore.h @@ -40,7 +40,6 @@ uint64_t kvstoreGetHash(kvstore *kvs, const void *key); /* kvstore iterator specific functions */ kvstoreIterator *kvstoreIteratorInit(kvstore *kvs); void kvstoreIteratorRelease(kvstoreIterator *kvs_it); -dict *kvstoreIteratorNextDict(kvstoreIterator *kvs_it); int kvstoreIteratorGetCurrentDictIndex(kvstoreIterator *kvs_it); dictEntry *kvstoreIteratorNext(kvstoreIterator *kvs_it); diff --git a/src/latency.c b/src/latency.c index 78f3cc3edd..94840379bd 100644 --- a/src/latency.c +++ b/src/latency.c @@ -46,12 +46,9 @@ uint64_t dictStringHash(const void *key) { return dictGenHashFunction(key, strlen(key)); } -void dictVanillaFree(dict *d, void *val); - dictType latencyTimeSeriesDictType = { dictStringHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictStringKeyCompare, /* key compare */ dictVanillaFree, /* key destructor */ dictVanillaFree, /* val destructor */ diff --git a/src/lazyfree.c b/src/lazyfree.c index f9811f0e64..38ccd913bd 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -1,19 +1,20 @@ #include "server.h" #include "bio.h" -#include "atomicvar.h" #include "functions.h" #include "cluster.h" -static serverAtomic size_t lazyfree_objects = 0; -static serverAtomic size_t lazyfreed_objects = 0; +#include + +static _Atomic size_t lazyfree_objects = 0; +static _Atomic size_t lazyfreed_objects = 0; /* Release objects from the lazyfree thread. It's just decrRefCount() * updating the count of objects to release. */ void lazyfreeFreeObject(void *args[]) { robj *o = (robj *)args[0]; decrRefCount(o); - atomicDecr(lazyfree_objects, 1); - atomicIncr(lazyfreed_objects, 1); + atomic_fetch_sub_explicit(&lazyfree_objects, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, 1, memory_order_relaxed); } /* Release a database from the lazyfree thread. The 'db' pointer is the @@ -26,8 +27,8 @@ void lazyfreeFreeDatabase(void *args[]) { size_t numkeys = kvstoreSize(da1); kvstoreRelease(da1); kvstoreRelease(da2); - atomicDecr(lazyfree_objects, numkeys); - atomicIncr(lazyfreed_objects, numkeys); + atomic_fetch_sub_explicit(&lazyfree_objects, numkeys, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, numkeys, memory_order_relaxed); } /* Release the key tracking table. */ @@ -35,8 +36,8 @@ void lazyFreeTrackingTable(void *args[]) { rax *rt = args[0]; size_t len = rt->numele; freeTrackingRadixTree(rt); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release the error stats rax tree. */ @@ -44,8 +45,8 @@ void lazyFreeErrors(void *args[]) { rax *errors = args[0]; size_t len = errors->numele; raxFreeWithCallback(errors, zfree); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release the lua_scripts dict. */ @@ -55,8 +56,8 @@ void lazyFreeLuaScripts(void *args[]) { lua_State *lua = args[2]; long long len = dictSize(lua_scripts); freeLuaScriptsSync(lua_scripts, lua_scripts_lru_list, lua); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release the functions ctx. */ @@ -64,8 +65,8 @@ void lazyFreeFunctionsCtx(void *args[]) { functionsLibCtx *functions_lib_ctx = args[0]; size_t len = functionsLibCtxFunctionsLen(functions_lib_ctx); functionsLibCtxFree(functions_lib_ctx); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Release replication backlog referencing memory. */ @@ -76,26 +77,24 @@ void lazyFreeReplicationBacklogRefMem(void *args[]) { len += raxSize(index); listRelease(blocks); raxFree(index); - atomicDecr(lazyfree_objects, len); - atomicIncr(lazyfreed_objects, len); + atomic_fetch_sub_explicit(&lazyfree_objects, len, memory_order_relaxed); + atomic_fetch_add_explicit(&lazyfreed_objects, len, memory_order_relaxed); } /* Return the number of currently pending objects to free. */ size_t lazyfreeGetPendingObjectsCount(void) { - size_t aux; - atomicGet(lazyfree_objects, aux); + size_t aux = atomic_load_explicit(&lazyfree_objects, memory_order_relaxed); return aux; } /* Return the number of objects that have been freed. */ size_t lazyfreeGetFreedObjectsCount(void) { - size_t aux; - atomicGet(lazyfreed_objects, aux); + size_t aux = atomic_load_explicit(&lazyfreed_objects, memory_order_relaxed); return aux; } void lazyfreeResetStats(void) { - atomicSet(lazyfreed_objects, 0); + atomic_store_explicit(&lazyfreed_objects, 0, memory_order_relaxed); } /* Return the amount of work needed in order to free an object. @@ -175,7 +174,7 @@ void freeObjAsync(robj *key, robj *obj, int dbid) { * of parts of the server core may call incrRefCount() to protect * objects, and then call dbDelete(). */ if (free_effort > LAZYFREE_THRESHOLD && obj->refcount == 1) { - atomicIncr(lazyfree_objects, 1); + atomic_fetch_add_explicit(&lazyfree_objects, 1, memory_order_relaxed); bioCreateLazyFreeJob(lazyfreeFreeObject, 1, obj); } else { decrRefCount(obj); @@ -195,7 +194,7 @@ void emptyDbAsync(serverDb *db) { kvstore *oldkeys = db->keys, *oldexpires = db->expires; db->keys = kvstoreCreate(&dbDictType, slot_count_bits, flags); db->expires = kvstoreCreate(&dbExpiresDictType, slot_count_bits, flags); - atomicIncr(lazyfree_objects, kvstoreSize(oldkeys)); + atomic_fetch_add_explicit(&lazyfree_objects, kvstoreSize(oldkeys), memory_order_relaxed); bioCreateLazyFreeJob(lazyfreeFreeDatabase, 2, oldkeys, oldexpires); } @@ -204,7 +203,7 @@ void emptyDbAsync(serverDb *db) { void freeTrackingRadixTreeAsync(rax *tracking) { /* Because this rax has only keys and no values so we use numnodes. */ if (tracking->numnodes > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, tracking->numele); + atomic_fetch_add_explicit(&lazyfree_objects, tracking->numele, memory_order_relaxed); bioCreateLazyFreeJob(lazyFreeTrackingTable, 1, tracking); } else { freeTrackingRadixTree(tracking); @@ -216,7 +215,7 @@ void freeTrackingRadixTreeAsync(rax *tracking) { void freeErrorsRadixTreeAsync(rax *errors) { /* Because this rax has only keys and no values so we use numnodes. */ if (errors->numnodes > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, errors->numele); + atomic_fetch_add_explicit(&lazyfree_objects, errors->numele, memory_order_relaxed); bioCreateLazyFreeJob(lazyFreeErrors, 1, errors); } else { raxFreeWithCallback(errors, zfree); @@ -227,7 +226,7 @@ void freeErrorsRadixTreeAsync(rax *errors) { * Close lua interpreter, if there are a lot of lua scripts, close it in async way. */ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_State *lua) { if (dictSize(lua_scripts) > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, dictSize(lua_scripts)); + atomic_fetch_add_explicit(&lazyfree_objects, dictSize(lua_scripts), memory_order_relaxed); bioCreateLazyFreeJob(lazyFreeLuaScripts, 3, lua_scripts, lua_scripts_lru_list, lua); } else { freeLuaScriptsSync(lua_scripts, lua_scripts_lru_list, lua); @@ -237,7 +236,8 @@ void freeLuaScriptsAsync(dict *lua_scripts, list *lua_scripts_lru_list, lua_Stat /* Free functions ctx, if the functions ctx contains enough functions, free it in async way. */ void freeFunctionsAsync(functionsLibCtx *functions_lib_ctx) { if (functionsLibCtxFunctionsLen(functions_lib_ctx) > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, functionsLibCtxFunctionsLen(functions_lib_ctx)); + atomic_fetch_add_explicit(&lazyfree_objects, functionsLibCtxFunctionsLen(functions_lib_ctx), + memory_order_relaxed); bioCreateLazyFreeJob(lazyFreeFunctionsCtx, 1, functions_lib_ctx); } else { functionsLibCtxFree(functions_lib_ctx); @@ -247,7 +247,7 @@ void freeFunctionsAsync(functionsLibCtx *functions_lib_ctx) { /* Free replication backlog referencing buffer blocks and rax index. */ void freeReplicationBacklogRefMemAsync(list *blocks, rax *index) { if (listLength(blocks) > LAZYFREE_THRESHOLD || raxSize(index) > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects, listLength(blocks) + raxSize(index)); + atomic_fetch_add_explicit(&lazyfree_objects, listLength(blocks) + raxSize(index), memory_order_relaxed); bioCreateLazyFreeJob(lazyFreeReplicationBacklogRefMem, 2, blocks, index); } else { listRelease(blocks); diff --git a/src/listpack.c b/src/listpack.c index baa6f98be3..be970e1e64 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -427,19 +427,17 @@ static inline void lpEncodeString(unsigned char *buf, unsigned char *s, uint32_t * lpCurrentEncodedSizeBytes or ASSERT_INTEGRITY_LEN (possibly since 'p' is * a return value of another function that validated its return. */ static inline uint32_t lpCurrentEncodedSizeUnsafe(unsigned char *p) { - /* clang-format off */ if (LP_ENCODING_IS_7BIT_UINT(p[0])) return 1; - if (LP_ENCODING_IS_6BIT_STR(p[0])) return 1+LP_ENCODING_6BIT_STR_LEN(p); + if (LP_ENCODING_IS_6BIT_STR(p[0])) return 1 + LP_ENCODING_6BIT_STR_LEN(p); if (LP_ENCODING_IS_13BIT_INT(p[0])) return 2; if (LP_ENCODING_IS_16BIT_INT(p[0])) return 3; if (LP_ENCODING_IS_24BIT_INT(p[0])) return 4; if (LP_ENCODING_IS_32BIT_INT(p[0])) return 5; if (LP_ENCODING_IS_64BIT_INT(p[0])) return 9; - if (LP_ENCODING_IS_12BIT_STR(p[0])) return 2+LP_ENCODING_12BIT_STR_LEN(p); - if (LP_ENCODING_IS_32BIT_STR(p[0])) return 5+LP_ENCODING_32BIT_STR_LEN(p); + if (LP_ENCODING_IS_12BIT_STR(p[0])) return 2 + LP_ENCODING_12BIT_STR_LEN(p); + if (LP_ENCODING_IS_32BIT_STR(p[0])) return 5 + LP_ENCODING_32BIT_STR_LEN(p); if (p[0] == LP_EOF) return 1; return 0; - /* clang-format on */ } /* Return bytes needed to encode the length of the listpack element pointed by 'p'. @@ -447,7 +445,6 @@ static inline uint32_t lpCurrentEncodedSizeUnsafe(unsigned char *p) { * of the element (excluding the element data itself) * If the element encoding is wrong then 0 is returned. */ static inline uint32_t lpCurrentEncodedSizeBytes(unsigned char *p) { - /* clang-format off */ if (LP_ENCODING_IS_7BIT_UINT(p[0])) return 1; if (LP_ENCODING_IS_6BIT_STR(p[0])) return 1; if (LP_ENCODING_IS_13BIT_INT(p[0])) return 1; @@ -459,7 +456,6 @@ static inline uint32_t lpCurrentEncodedSizeBytes(unsigned char *p) { if (LP_ENCODING_IS_32BIT_STR(p[0])) return 5; if (p[0] == LP_EOF) return 1; return 0; - /* clang-format on */ } /* Skip the current entry returning the next. It is invalid to call this @@ -781,12 +777,12 @@ unsigned char *lpInsert(unsigned char *lp, unsigned char backlen[LP_MAX_BACKLEN_SIZE]; uint64_t enclen; /* The length of the encoded element. */ - int delete = (elestr == NULL && eleint == NULL); + int del_ele = (elestr == NULL && eleint == NULL); /* when deletion, it is conceptually replacing the element with a * zero-length element. So whatever we get passed as 'where', set * it to LP_REPLACE. */ - if (delete) where = LP_REPLACE; + if (del_ele) where = LP_REPLACE; /* If we need to insert after the current element, we just jump to the * next element (that could be the EOF one) and handle the case of @@ -825,7 +821,7 @@ unsigned char *lpInsert(unsigned char *lp, /* We need to also encode the backward-parsable length of the element * and append it to the end: this allows to traverse the listpack from * the end to the start. */ - unsigned long backlen_size = (!delete) ? lpEncodeBacklen(backlen, enclen) : 0; + unsigned long backlen_size = (!del_ele) ? lpEncodeBacklen(backlen, enclen) : 0; uint64_t old_listpack_bytes = lpGetTotalBytes(lp); uint32_t replaced_len = 0; if (where == LP_REPLACE) { @@ -870,9 +866,9 @@ unsigned char *lpInsert(unsigned char *lp, *newp = dst; /* In case of deletion, set 'newp' to NULL if the next element is * the EOF element. */ - if (delete && dst[0] == LP_EOF) *newp = NULL; + if (del_ele && dst[0] == LP_EOF) *newp = NULL; } - if (!delete) { + if (!del_ele) { if (enctype == LP_ENCODING_INT) { memcpy(dst, eleint, enclen); } else if (elestr) { @@ -886,10 +882,10 @@ unsigned char *lpInsert(unsigned char *lp, } /* Update header. */ - if (where != LP_REPLACE || delete) { + if (where != LP_REPLACE || del_ele) { uint32_t num_elements = lpGetNumElements(lp); if (num_elements != LP_HDR_NUMELE_UNKNOWN) { - if (!delete) + if (!del_ele) lpSetNumElements(lp, num_elements + 1); else lpSetNumElements(lp, num_elements - 1); diff --git a/src/logreqres.c b/src/logreqres.c index af4021afb1..70b4e55f6f 100644 --- a/src/logreqres.c +++ b/src/logreqres.c @@ -78,10 +78,10 @@ static int reqresShouldLog(client *c) { if (!server.req_res_logfile) return 0; /* Ignore client with streaming non-standard response */ - if (c->flags & (CLIENT_PUBSUB | CLIENT_MONITOR | CLIENT_SLAVE)) return 0; + if (c->flags & (CLIENT_PUBSUB | CLIENT_MONITOR | CLIENT_REPLICA)) return 0; - /* We only work on masters (didn't implement reqresAppendResponse to work on shared slave buffers) */ - if (getClientType(c) == CLIENT_TYPE_MASTER) return 0; + /* We only work on primaries (didn't implement reqresAppendResponse to work on shared replica buffers) */ + if (getClientType(c) == CLIENT_TYPE_PRIMARY) return 0; return 1; } diff --git a/src/module.c b/src/module.c index f37d3a8302..f149443c80 100644 --- a/src/module.c +++ b/src/module.c @@ -1220,7 +1220,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, * Starting from Redis OSS 7.0 this flag has been deprecated. * Declaring a command as "random" can be done using * command tips, see https://valkey.io/topics/command-tips. - * * **"allow-stale"**: The command is allowed to run on slaves that don't + * * **"allow-stale"**: The command is allowed to run on replicas that don't * serve stale data. Don't use if you don't know what * this means. * * **"no-monitor"**: Don't propagate the command on monitor. Use this if @@ -2413,8 +2413,7 @@ void VM_Yield(ValkeyModuleCtx *ctx, int flags, const char *busy_reply) { * after the main thread enters acquiring GIL state in order to protect the event * loop (ae.c) and avoid potential race conditions. */ - int acquiring; - atomicGet(server.module_gil_acquring, acquiring); + int acquiring = atomic_load_explicit(&server.module_gil_acquiring, memory_order_relaxed); if (!acquiring) { /* If the main thread has not yet entered the acquiring GIL state, * we attempt to wake it up and exit without waiting for it to @@ -3492,7 +3491,7 @@ int VM_ReplyWithLongDouble(ValkeyModuleCtx *ctx, long double ld) { * ## Commands replication API * -------------------------------------------------------------------------- */ -/* Replicate the specified command and arguments to slaves and AOF, as effect +/* Replicate the specified command and arguments to replicas and AOF, as effect * of execution of the calling command implementation. * * The replicated commands are always wrapped into the MULTI/EXEC that @@ -3566,7 +3565,7 @@ int VM_Replicate(ValkeyModuleCtx *ctx, const char *cmdname, const char *fmt, ... * commands. * * Basically this form of replication is useful when you want to propagate - * the command to the slaves and AOF file exactly as it was called, since + * the command to the replicas and AOF file exactly as it was called, since * the command can just be re-executed to deterministically re-create the * new state starting from the old one. * @@ -3665,12 +3664,12 @@ int modulePopulateReplicationInfoStructure(void *ri, int structver) { ValkeyModuleReplicationInfoV1 *ri1 = ri; memset(ri1, 0, sizeof(*ri1)); ri1->version = structver; - ri1->master = server.masterhost == NULL; - ri1->masterhost = server.masterhost ? server.masterhost : ""; - ri1->masterport = server.masterport; + ri1->primary = server.primary_host == NULL; + ri1->primary_host = server.primary_host ? server.primary_host : ""; + ri1->primary_port = server.primary_port; ri1->replid1 = server.replid; ri1->replid2 = server.replid2; - ri1->repl1_offset = server.master_repl_offset; + ri1->repl1_offset = server.primary_repl_offset; ri1->repl2_offset = server.second_replid_offset; return VALKEYMODULE_OK; } @@ -3795,7 +3794,7 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { * * VALKEYMODULE_CTX_FLAGS_MULTI: The command is running inside a transaction * * * VALKEYMODULE_CTX_FLAGS_REPLICATED: The command was sent over the replication - * link by the MASTER + * link by the PRIMARY * * * VALKEYMODULE_CTX_FLAGS_PRIMARY: The instance is a primary * @@ -3822,16 +3821,16 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { * * * VALKEYMODULE_CTX_FLAGS_LOADING: Server is loading RDB/AOF * - * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_STALE: No active link with the master. + * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_STALE: No active link with the primary. * * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_CONNECTING: The replica is trying to - * connect with the master. + * connect with the primary. * - * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_TRANSFERRING: Master -> Replica RDB + * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_TRANSFERRING: primary -> Replica RDB * transfer is in progress. * * * VALKEYMODULE_CTX_FLAGS_REPLICA_IS_ONLINE: The replica has an active link - * with its master. This is the + * with its primary. This is the * contrary of STALE state. * * * VALKEYMODULE_CTX_FLAGS_ACTIVE_CHILD: There is currently some background @@ -3855,8 +3854,8 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { if (ctx) { if (ctx->client) { if (ctx->client->flags & CLIENT_DENY_BLOCKING) flags |= VALKEYMODULE_CTX_FLAGS_DENY_BLOCKING; - /* Module command received from MASTER, is replicated. */ - if (ctx->client->flags & CLIENT_MASTER) flags |= VALKEYMODULE_CTX_FLAGS_REPLICATED; + /* Module command received from PRIMARY, is replicated. */ + if (ctx->client->flags & CLIENT_PRIMARY) flags |= VALKEYMODULE_CTX_FLAGS_REPLICATED; if (ctx->client->resp == 3) { flags |= VALKEYMODULE_CTX_FLAGS_RESP3; } @@ -3881,7 +3880,7 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { flags |= VALKEYMODULE_CTX_FLAGS_LOADING; /* Maxmemory and eviction policy */ - if (server.maxmemory > 0 && (!server.masterhost || !server.repl_slave_ignore_maxmemory)) { + if (server.maxmemory > 0 && (!server.primary_host || !server.repl_replica_ignore_maxmemory)) { flags |= VALKEYMODULE_CTX_FLAGS_MAXMEMORY; if (server.maxmemory_policy != MAXMEMORY_NO_EVICTION) flags |= VALKEYMODULE_CTX_FLAGS_EVICT; @@ -3892,11 +3891,11 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { if (server.saveparamslen > 0) flags |= VALKEYMODULE_CTX_FLAGS_RDB; /* Replication flags */ - if (server.masterhost == NULL) { + if (server.primary_host == NULL) { flags |= VALKEYMODULE_CTX_FLAGS_PRIMARY; } else { flags |= VALKEYMODULE_CTX_FLAGS_REPLICA; - if (server.repl_slave_ro) flags |= VALKEYMODULE_CTX_FLAGS_READONLY; + if (server.repl_replica_ro) flags |= VALKEYMODULE_CTX_FLAGS_READONLY; /* Replica state flags. */ if (server.repl_state == REPL_STATE_CONNECT || server.repl_state == REPL_STATE_CONNECTING) { @@ -3928,16 +3927,16 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { /* Returns true if a client sent the CLIENT PAUSE command to the server or * if the Cluster does a manual failover, pausing the clients. - * This is needed when we have a master with replicas, and want to write, + * This is needed when we have a primary with replicas, and want to write, * without adding further data to the replication channel, that the replicas - * replication offset, match the one of the master. When this happens, it is - * safe to failover the master without data loss. + * replication offset, match the one of the primary. When this happens, it is + * safe to failover the primary without data loss. * * However modules may generate traffic by calling ValkeyModule_Call() with * the "!" flag, or by calling ValkeyModule_Replicate(), in a context outside * commands execution, for instance in timeout callbacks, threads safe * contexts, and so forth. When modules will generate too much traffic, it - * will be hard for the master and replicas offset to match, because there + * will be hard for the primary and replicas offset to match, because there * is more data to send in the replication channel. * * So modules may want to try to avoid very heavy background work that has @@ -6370,21 +6369,21 @@ ValkeyModuleCallReply *VM_Call(ValkeyModuleCtx *ctx, const char *cmdname, const goto cleanup; } - if (server.masterhost && server.repl_slave_ro && !obey_client) { + if (server.primary_host && server.repl_replica_ro && !obey_client) { errno = ESPIPE; if (error_as_call_replies) { - sds msg = sdsdup(shared.roslaveerr->ptr); + sds msg = sdsdup(shared.roreplicaerr->ptr); reply = callReplyCreateError(msg, ctx); } goto cleanup; } } - if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED && server.repl_serve_stale_data == 0 && + if (server.primary_host && server.repl_state != REPL_STATE_CONNECTED && server.repl_serve_stale_data == 0 && !(cmd_flags & CMD_STALE)) { errno = ESPIPE; if (error_as_call_replies) { - sds msg = sdsdup(shared.masterdownerr->ptr); + sds msg = sdsdup(shared.primarydownerr->ptr); reply = callReplyCreateError(msg, ctx); } goto cleanup; @@ -6419,7 +6418,7 @@ ValkeyModuleCallReply *VM_Call(ValkeyModuleCtx *ctx, const char *cmdname, const /* If this is a Cluster node, we need to make sure the module is not * trying to access non-local keys, with the exception of commands - * received from our master. */ + * received from our primary. */ if (server.cluster_enabled && !mustObeyClient(ctx->client)) { int error_code; /* Duplicate relevant flags in the module client. */ @@ -7918,7 +7917,7 @@ int checkModuleAuthentication(client *c, robj *username, robj *password, robj ** } if (c->flags & CLIENT_MODULE_AUTH_HAS_RESULT) { c->flags &= ~CLIENT_MODULE_AUTH_HAS_RESULT; - if (c->authenticated) return AUTH_OK; + if (c->flags & CLIENT_AUTHENTICATED) return AUTH_OK; } return AUTH_ERR; } @@ -8294,7 +8293,7 @@ void moduleHandleBlockedClients(void) { /* Update the wait offset, we don't know if this blocked client propagated anything, * currently we rather not add any API for that, so we just assume it did. */ - c->woff = server.master_repl_offset; + c->woff = server.primary_repl_offset; /* Put the client in the list of clients that need to write * if there are pending replies here. This is needed since @@ -8688,7 +8687,7 @@ int VM_AddPostNotificationJob(ValkeyModuleCtx *ctx, ValkeyModulePostNotificationJobFunc callback, void *privdata, void (*free_privdata)(void *)) { - if (server.loading || (server.masterhost && server.repl_slave_ro)) { + if (server.loading || (server.primary_host && server.repl_replica_ro)) { return VALKEYMODULE_ERR; } ValkeyModulePostExecUnitJob *job = zmalloc(sizeof(*job)); @@ -8813,7 +8812,7 @@ typedef struct moduleClusterNodeInfo { int flags; char ip[NET_IP_STR_LEN]; int port; - char master_id[40]; /* Only if flags & VALKEYMODULE_NODE_PRIMARY is true. */ + char primary_id[40]; /* Only if flags & VALKEYMODULE_NODE_PRIMARY is true. */ } mdouleClusterNodeInfo; /* We have an array of message types: each bucket is a linked list of @@ -8956,11 +8955,11 @@ size_t VM_GetClusterSize(void) { * or the node ID does not exist from the POV of this local node, VALKEYMODULE_ERR * is returned. * - * The arguments `ip`, `master_id`, `port` and `flags` can be NULL in case we don't - * need to populate back certain info. If an `ip` and `master_id` (only populated - * if the instance is a slave) are specified, they point to buffers holding + * The arguments `ip`, `primary_id`, `port` and `flags` can be NULL in case we don't + * need to populate back certain info. If an `ip` and `primary_id` (only populated + * if the instance is a replica) are specified, they point to buffers holding * at least VALKEYMODULE_NODE_ID_LEN bytes. The strings written back as `ip` - * and `master_id` are not null terminated. + * and `primary_id` are not null terminated. * * The list of flags reported is the following: * @@ -8969,9 +8968,9 @@ size_t VM_GetClusterSize(void) { * * VALKEYMODULE_NODE_REPLICA: The node is a replica * * VALKEYMODULE_NODE_PFAIL: We see the node as failing * * VALKEYMODULE_NODE_FAIL: The cluster agrees the node is failing - * * VALKEYMODULE_NODE_NOFAILOVER: The slave is configured to never failover + * * VALKEYMODULE_NODE_NOFAILOVER: The replica is configured to never failover */ -int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags) { +int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char *primary_id, int *port, int *flags) { UNUSED(ctx); clusterNode *node = clusterLookupNode(id, strlen(id)); @@ -8981,14 +8980,14 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * if (ip) valkey_strlcpy(ip, clusterNodeIp(node), NET_IP_STR_LEN); - if (master_id) { + if (primary_id) { /* If the information is not available, the function will set the * field to zero bytes, so that when the field can't be populated the * function kinda remains predictable. */ - if (clusterNodeIsSlave(node) && clusterNodeGetMaster(node)) - memcpy(master_id, clusterNodeGetName(clusterNodeGetMaster(node)), VALKEYMODULE_NODE_ID_LEN); + if (clusterNodeIsReplica(node) && clusterNodeGetPrimary(node)) + memcpy(primary_id, clusterNodeGetName(clusterNodeGetPrimary(node)), VALKEYMODULE_NODE_ID_LEN); else - memset(master_id, 0, VALKEYMODULE_NODE_ID_LEN); + memset(primary_id, 0, VALKEYMODULE_NODE_ID_LEN); } if (port) *port = getNodeDefaultClientPort(node); @@ -8997,8 +8996,8 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * if (flags) { *flags = 0; if (clusterNodeIsMyself(node)) *flags |= VALKEYMODULE_NODE_MYSELF; - if (clusterNodeIsMaster(node)) *flags |= VALKEYMODULE_NODE_PRIMARY; - if (clusterNodeIsSlave(node)) *flags |= VALKEYMODULE_NODE_REPLICA; + if (clusterNodeIsPrimary(node)) *flags |= VALKEYMODULE_NODE_PRIMARY; + if (clusterNodeIsReplica(node)) *flags |= VALKEYMODULE_NODE_REPLICA; if (clusterNodeTimedOut(node)) *flags |= VALKEYMODULE_NODE_PFAIL; if (clusterNodeIsFailing(node)) *flags |= VALKEYMODULE_NODE_FAIL; if (clusterNodeIsNoFailover(node)) *flags |= VALKEYMODULE_NODE_NOFAILOVER; @@ -9017,7 +9016,7 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * * * With the following effects: * - * * NO_FAILOVER: prevent Cluster slaves from failing over a dead master. + * * NO_FAILOVER: prevent Cluster replicas from failing over a dead primary. * Also disables the replica migration feature. * * * NO_REDIRECTION: Every node will accept any key, without trying to perform @@ -9466,7 +9465,7 @@ void revokeClientAuthentication(client *c) { moduleNotifyUserChanged(c); c->user = DefaultUser; - c->authenticated = 0; + c->flags &= ~CLIENT_AUTHENTICATED; /* We will write replies to this client later, so we can't close it * directly even if async. */ if (c == server.current_client) { @@ -9788,7 +9787,7 @@ static int authenticateClientWithUser(ValkeyModuleCtx *ctx, moduleNotifyUserChanged(ctx->client); ctx->client->user = user; - ctx->client->authenticated = 1; + ctx->client->flags |= CLIENT_AUTHENTICATED; if (clientHasModuleAuthInProgress(ctx->client)) { ctx->client->flags |= CLIENT_MODULE_AUTH_HAS_RESULT; @@ -10595,7 +10594,7 @@ int moduleUnregisterFilters(ValkeyModule *module) { * 1. Invocation by a client. * 2. Invocation through `ValkeyModule_Call()` by any module. * 3. Invocation through Lua `redis.call()`. - * 4. Replication of a command from a master. + * 4. Replication of a command from a primary. * * The filter executes in a special filter context, which is different and more * limited than a ValkeyModuleCtx. Because the filter affects any command, it @@ -11244,10 +11243,10 @@ static uint64_t moduleEventVersions[] = { * * * ValkeyModuleEvent_ReplicationRoleChanged: * - * This event is called when the instance switches from master + * This event is called when the instance switches from primary * to replica or the other way around, however the event is * also called when the replica remains a replica but starts to - * replicate with a different master. + * replicate with a different primary. * * The following sub events are available: * @@ -11257,9 +11256,9 @@ static uint64_t moduleEventVersions[] = { * The 'data' field can be casted by the callback to a * `ValkeyModuleReplicationInfo` structure with the following fields: * - * int master; // true if master, false if replica - * char *masterhost; // master instance hostname for NOW_REPLICA - * int masterport; // master instance port for NOW_REPLICA + * int primary; // true if primary, false if replica + * char *primary_host; // primary instance hostname for NOW_REPLICA + * int primary_port; // primary instance port for NOW_REPLICA * char *replid1; // Main replication ID * char *replid2; // Secondary replication ID * uint64_t repl1_offset; // Main replication offset @@ -11316,7 +11315,7 @@ static uint64_t moduleEventVersions[] = { * * Called on loading operations: at startup when the server is * started, but also after a first synchronization when the - * replica is loading the RDB file from the master. + * replica is loading the RDB file from the primary. * The following sub events are available: * * * `VALKEYMODULE_SUBEVENT_LOADING_RDB_START` @@ -11345,7 +11344,7 @@ static uint64_t moduleEventVersions[] = { * * ValkeyModuleEvent_ReplicaChange * * This event is called when the instance (that can be both a - * master or a replica) get a new online replica, or lose a + * primary or a replica) get a new online replica, or lose a * replica since it gets disconnected. * The following sub events are available: * @@ -11373,9 +11372,9 @@ static uint64_t moduleEventVersions[] = { * * ValkeyModuleEvent_PrimaryLinkChange * * This is called for replicas in order to notify when the - * replication link becomes functional (up) with our master, + * replication link becomes functional (up) with our primary, * or when it goes down. Note that the link is not considered - * up when we just connected to the master, but only if the + * up when we just connected to the primary, but only if the * replication is happening correctly. * The following sub events are available: * @@ -11443,7 +11442,7 @@ static uint64_t moduleEventVersions[] = { * * * ValkeyModuleEvent_ReplAsyncLoad * - * Called when repl-diskless-load config is set to swapdb and a replication with a master of same + * Called when repl-diskless-load config is set to swapdb and a replication with a primary of same * data set history (matching replication ID) occurs. * In which case the server serves current data set while loading new database in memory from socket. * Modules must have declared they support this mechanism in order to activate it, through @@ -11781,7 +11780,6 @@ int dictCStringKeyCompare(dict *d, const void *key1, const void *key2) { dictType moduleAPIDictType = { dictCStringKeyHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictCStringKeyCompare, /* key compare */ NULL, /* key destructor */ NULL, /* val destructor */ @@ -11812,7 +11810,6 @@ void moduleInitModulesSystemLast(void) { dictType sdsKeyValueHashDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ - NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictSdsDestructor, /* val destructor */ @@ -11823,7 +11820,7 @@ void moduleInitModulesSystem(void) { moduleUnblockedClients = listCreate(); server.loadmodule_queue = listCreate(); server.module_configs_queue = dictCreate(&sdsKeyValueHashDictType); - server.module_gil_acquring = 0; + server.module_gil_acquiring = 0; modules = dictCreate(&modulesDictType); moduleAuthCallbacks = listCreate(); @@ -11927,7 +11924,7 @@ void moduleRemoveCateogires(ValkeyModule *module) { * The function aborts the server on errors, since to start with missing * modules is not considered sane: clients may rely on the existence of * given commands, loading AOF also may need some modules to exist, and - * if this instance is a slave, it must understand commands from master. */ + * if this instance is a replica, it must understand commands from primary. */ void moduleLoadFromQueue(void) { listIter li; listNode *ln; @@ -12912,13 +12909,13 @@ int VM_RdbLoad(ValkeyModuleCtx *ctx, ValkeyModuleRdbStream *stream, int flags) { } /* Not allowed on replicas. */ - if (server.masterhost != NULL) { + if (server.primary_host != NULL) { errno = ENOTSUP; return VALKEYMODULE_ERR; } /* Drop replicas if exist. */ - disconnectSlaves(); + disconnectReplicas(); freeReplicationBacklog(); if (server.aof_state != AOF_OFF) stopAppendOnly(); diff --git a/src/modules/helloacl.c b/src/modules/helloacl.c index ed7298e696..6659b98f8c 100644 --- a/src/modules/helloacl.c +++ b/src/modules/helloacl.c @@ -39,7 +39,7 @@ static ValkeyModuleUser *global; static uint64_t global_auth_client_id = 0; -/* HELLOACL.REVOKE +/* HELLOACL.REVOKE * Synchronously revoke access from a user. */ int RevokeCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); @@ -49,11 +49,11 @@ int RevokeCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, ValkeyModule_DeauthenticateAndCloseClient(ctx, global_auth_client_id); return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } else { - return ValkeyModule_ReplyWithError(ctx, "Global user currently not used"); + return ValkeyModule_ReplyWithError(ctx, "Global user currently not used"); } } -/* HELLOACL.RESET +/* HELLOACL.RESET * Synchronously delete and re-create a module user. */ int ResetCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); @@ -68,7 +68,7 @@ int ResetCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } -/* Callback handler for user changes, use this to notify a module of +/* Callback handler for user changes, use this to notify a module of * changes to users authenticated by the module */ void HelloACL_UserChanged(uint64_t client_id, void *privdata) { VALKEYMODULE_NOT_USED(privdata); @@ -76,14 +76,14 @@ void HelloACL_UserChanged(uint64_t client_id, void *privdata) { global_auth_client_id = 0; } -/* HELLOACL.AUTHGLOBAL +/* HELLOACL.AUTHGLOBAL * Synchronously assigns a module user to the current context. */ int AuthGlobalCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); if (global_auth_client_id) { - return ValkeyModule_ReplyWithError(ctx, "Global user currently used"); + return ValkeyModule_ReplyWithError(ctx, "Global user currently used"); } ValkeyModule_AuthenticateClientWithUser(ctx, global, HelloACL_UserChanged, NULL, &global_auth_client_id); @@ -102,9 +102,8 @@ int HelloACL_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { ValkeyModuleString *user_string = ValkeyModule_GetBlockedClientPrivateData(ctx); const char *name = ValkeyModule_StringPtrLen(user_string, &length); - if (ValkeyModule_AuthenticateClientWithACLUser(ctx, name, length, NULL, NULL, NULL) == - VALKEYMODULE_ERR) { - return ValkeyModule_ReplyWithError(ctx, "Invalid Username or password"); + if (ValkeyModule_AuthenticateClientWithACLUser(ctx, name, length, NULL, NULL, NULL) == VALKEYMODULE_ERR) { + return ValkeyModule_ReplyWithError(ctx, "Invalid Username or password"); } return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } @@ -129,20 +128,21 @@ void *HelloACL_ThreadMain(void *args) { ValkeyModuleString *user = targs[1]; ValkeyModule_Free(targs); - ValkeyModule_UnblockClient(bc,user); + ValkeyModule_UnblockClient(bc, user); return NULL; } -/* HELLOACL.AUTHASYNC +/* HELLOACL.AUTHASYNC * Asynchronously assigns an ACL user to the current context. */ int AuthAsyncCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); pthread_t tid; - ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx, HelloACL_Reply, HelloACL_Timeout, HelloACL_FreeData, TIMEOUT_TIME); - + ValkeyModuleBlockedClient *bc = + ValkeyModule_BlockClient(ctx, HelloACL_Reply, HelloACL_Timeout, HelloACL_FreeData, TIMEOUT_TIME); - void **targs = ValkeyModule_Alloc(sizeof(void*)*2); + + void **targs = ValkeyModule_Alloc(sizeof(void *) * 2); targs[0] = bc; targs[1] = ValkeyModule_CreateStringFromString(NULL, argv[1]); @@ -160,23 +160,21 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"helloacl",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "helloacl", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.reset", - ResetCommand_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.reset", ResetCommand_ValkeyCommand, "", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.revoke", - RevokeCommand_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.revoke", RevokeCommand_ValkeyCommand, "", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.authglobal", - AuthGlobalCommand_ValkeyCommand,"no-auth",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.authglobal", AuthGlobalCommand_ValkeyCommand, "no-auth", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"helloacl.authasync", - AuthAsyncCommand_ValkeyCommand,"no-auth",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "helloacl.authasync", AuthAsyncCommand_ValkeyCommand, "no-auth", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; global = ValkeyModule_CreateModuleUser("global"); diff --git a/src/modules/helloblock.c b/src/modules/helloblock.c index 40f01f191c..65e9bb71a2 100644 --- a/src/modules/helloblock.c +++ b/src/modules/helloblock.c @@ -42,14 +42,14 @@ int HelloBlock_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); int *myint = ValkeyModule_GetBlockedClientPrivateData(ctx); - return ValkeyModule_ReplyWithLongLong(ctx,*myint); + return ValkeyModule_ReplyWithLongLong(ctx, *myint); } /* Timeout callback for blocking command HELLO.BLOCK */ int HelloBlock_Timeout(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - return ValkeyModule_ReplyWithSimpleString(ctx,"Request timedout"); + return ValkeyModule_ReplyWithSimpleString(ctx, "Request timedout"); } /* Private data freeing callback for HELLO.BLOCK command. */ @@ -69,7 +69,7 @@ void *HelloBlock_ThreadMain(void *arg) { sleep(delay); int *r = ValkeyModule_Alloc(sizeof(int)); *r = rand(); - ValkeyModule_UnblockClient(bc,r); + ValkeyModule_UnblockClient(bc, r); return NULL; } @@ -82,8 +82,7 @@ void *HelloBlock_ThreadMain(void *arg) { * amount of seconds with a while loop calling sleep(1), so that once we * detect the client disconnection, we can terminate the thread ASAP. */ void HelloBlock_Disconnected(ValkeyModuleCtx *ctx, ValkeyModuleBlockedClient *bc) { - ValkeyModule_Log(ctx,"warning","Blocked client %p disconnected!", - (void*)bc); + ValkeyModule_Log(ctx, "warning", "Blocked client %p disconnected!", (void *)bc); /* Here you should cleanup your state / threads, and if possible * call ValkeyModule_UnblockClient(), or notify the thread that will @@ -98,32 +97,33 @@ int HelloBlock_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in long long delay; long long timeout; - if (ValkeyModule_StringToLongLong(argv[1],&delay) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[1], &delay) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } - if (ValkeyModule_StringToLongLong(argv[2],&timeout) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[2], &timeout) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } pthread_t tid; - ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout); + ValkeyModuleBlockedClient *bc = + ValkeyModule_BlockClient(ctx, HelloBlock_Reply, HelloBlock_Timeout, HelloBlock_FreeData, timeout); /* Here we set a disconnection handler, however since this module will * block in sleep() in a thread, there is not much we can do in the * callback, so this is just to show you the API. */ - ValkeyModule_SetDisconnectCallback(bc,HelloBlock_Disconnected); + ValkeyModule_SetDisconnectCallback(bc, HelloBlock_Disconnected); /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the delay and a reference to the blocked client handle. */ - void **targ = ValkeyModule_Alloc(sizeof(void*)*2); + void **targ = ValkeyModule_Alloc(sizeof(void *) * 2); targ[0] = bc; - targ[1] = (void*)(unsigned long) delay; + targ[1] = (void *)(unsigned long)delay; - if (pthread_create(&tid,NULL,HelloBlock_ThreadMain,targ) != 0) { + if (pthread_create(&tid, NULL, HelloBlock_ThreadMain, targ) != 0) { ValkeyModule_AbortBlock(bc); - return ValkeyModule_ReplyWithError(ctx,"-ERR Can't start thread"); + return ValkeyModule_ReplyWithError(ctx, "-ERR Can't start thread"); } return VALKEYMODULE_OK; } @@ -141,35 +141,31 @@ void *HelloKeys_ThreadMain(void *arg) { long long cursor = 0; size_t replylen = 0; - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); do { ValkeyModule_ThreadSafeContextLock(ctx); - ValkeyModuleCallReply *reply = ValkeyModule_Call(ctx, - "SCAN","l",(long long)cursor); + ValkeyModuleCallReply *reply = ValkeyModule_Call(ctx, "SCAN", "l", (long long)cursor); ValkeyModule_ThreadSafeContextUnlock(ctx); - ValkeyModuleCallReply *cr_cursor = - ValkeyModule_CallReplyArrayElement(reply,0); - ValkeyModuleCallReply *cr_keys = - ValkeyModule_CallReplyArrayElement(reply,1); + ValkeyModuleCallReply *cr_cursor = ValkeyModule_CallReplyArrayElement(reply, 0); + ValkeyModuleCallReply *cr_keys = ValkeyModule_CallReplyArrayElement(reply, 1); ValkeyModuleString *s = ValkeyModule_CreateStringFromCallReply(cr_cursor); - ValkeyModule_StringToLongLong(s,&cursor); - ValkeyModule_FreeString(ctx,s); + ValkeyModule_StringToLongLong(s, &cursor); + ValkeyModule_FreeString(ctx, s); size_t items = ValkeyModule_CallReplyLength(cr_keys); for (size_t j = 0; j < items; j++) { - ValkeyModuleCallReply *ele = - ValkeyModule_CallReplyArrayElement(cr_keys,j); - ValkeyModule_ReplyWithCallReply(ctx,ele); + ValkeyModuleCallReply *ele = ValkeyModule_CallReplyArrayElement(cr_keys, j); + ValkeyModule_ReplyWithCallReply(ctx, ele); replylen++; } ValkeyModule_FreeCallReply(reply); } while (cursor != 0); - ValkeyModule_ReplySetArrayLength(ctx,replylen); + ValkeyModule_ReplySetArrayLength(ctx, replylen); ValkeyModule_FreeThreadSafeContext(ctx); - ValkeyModule_UnblockClient(bc,NULL); + ValkeyModule_UnblockClient(bc, NULL); return NULL; } @@ -186,14 +182,14 @@ int HelloKeys_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int /* Note that when blocking the client we do not set any callback: no * timeout is possible since we passed '0', nor we need a reply callback * because we'll use the thread safe context to accumulate a reply. */ - ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx,NULL,NULL,NULL,0); + ValkeyModuleBlockedClient *bc = ValkeyModule_BlockClient(ctx, NULL, NULL, NULL, 0); /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the reference to the blocked client handle. */ - if (pthread_create(&tid,NULL,HelloKeys_ThreadMain,bc) != 0) { + if (pthread_create(&tid, NULL, HelloKeys_ThreadMain, bc) != 0) { ValkeyModule_AbortBlock(bc); - return ValkeyModule_ReplyWithError(ctx,"-ERR Can't start thread"); + return ValkeyModule_ReplyWithError(ctx, "-ERR Can't start thread"); } return VALKEYMODULE_OK; } @@ -204,14 +200,11 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"helloblock",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "helloblock", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.block", - HelloBlock_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.block", HelloBlock_ValkeyCommand, "", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.keys", - HelloKeys_ValkeyCommand,"",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.keys", HelloKeys_ValkeyCommand, "", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/modules/hellocluster.c b/src/modules/hellocluster.c index 996b506535..cfc0d4f0f4 100644 --- a/src/modules/hellocluster.c +++ b/src/modules/hellocluster.c @@ -44,7 +44,7 @@ int PingallCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - ValkeyModule_SendClusterMessage(ctx,NULL,MSGTYPE_PING,"Hey",3); + ValkeyModule_SendClusterMessage(ctx, NULL, MSGTYPE_PING, "Hey", 3); return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } @@ -54,36 +54,44 @@ int ListCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, i VALKEYMODULE_NOT_USED(argc); size_t numnodes; - char **ids = ValkeyModule_GetClusterNodesList(ctx,&numnodes); + char **ids = ValkeyModule_GetClusterNodesList(ctx, &numnodes); if (ids == NULL) { - return ValkeyModule_ReplyWithError(ctx,"Cluster not enabled"); + return ValkeyModule_ReplyWithError(ctx, "Cluster not enabled"); } - ValkeyModule_ReplyWithArray(ctx,numnodes); + ValkeyModule_ReplyWithArray(ctx, numnodes); for (size_t j = 0; j < numnodes; j++) { int port; - ValkeyModule_GetClusterNodeInfo(ctx,ids[j],NULL,NULL,&port,NULL); - ValkeyModule_ReplyWithArray(ctx,2); - ValkeyModule_ReplyWithStringBuffer(ctx,ids[j],VALKEYMODULE_NODE_ID_LEN); - ValkeyModule_ReplyWithLongLong(ctx,port); + ValkeyModule_GetClusterNodeInfo(ctx, ids[j], NULL, NULL, &port, NULL); + ValkeyModule_ReplyWithArray(ctx, 2); + ValkeyModule_ReplyWithStringBuffer(ctx, ids[j], VALKEYMODULE_NODE_ID_LEN); + ValkeyModule_ReplyWithLongLong(ctx, port); } ValkeyModule_FreeClusterNodesList(ids); return VALKEYMODULE_OK; } /* Callback for message MSGTYPE_PING */ -void PingReceiver(ValkeyModuleCtx *ctx, const char *sender_id, uint8_t type, const unsigned char *payload, uint32_t len) { - ValkeyModule_Log(ctx,"notice","PING (type %d) RECEIVED from %.*s: '%.*s'", - type,VALKEYMODULE_NODE_ID_LEN,sender_id,(int)len, payload); - ValkeyModule_SendClusterMessage(ctx,NULL,MSGTYPE_PONG,"Ohi!",4); +void PingReceiver(ValkeyModuleCtx *ctx, + const char *sender_id, + uint8_t type, + const unsigned char *payload, + uint32_t len) { + ValkeyModule_Log(ctx, "notice", "PING (type %d) RECEIVED from %.*s: '%.*s'", type, VALKEYMODULE_NODE_ID_LEN, + sender_id, (int)len, payload); + ValkeyModule_SendClusterMessage(ctx, NULL, MSGTYPE_PONG, "Ohi!", 4); ValkeyModuleCallReply *reply = ValkeyModule_Call(ctx, "INCR", "c", "pings_received"); ValkeyModule_FreeCallReply(reply); } /* Callback for message MSGTYPE_PONG. */ -void PongReceiver(ValkeyModuleCtx *ctx, const char *sender_id, uint8_t type, const unsigned char *payload, uint32_t len) { - ValkeyModule_Log(ctx,"notice","PONG (type %d) RECEIVED from %.*s: '%.*s'", - type,VALKEYMODULE_NODE_ID_LEN,sender_id,(int)len, payload); +void PongReceiver(ValkeyModuleCtx *ctx, + const char *sender_id, + uint8_t type, + const unsigned char *payload, + uint32_t len) { + ValkeyModule_Log(ctx, "notice", "PONG (type %d) RECEIVED from %.*s: '%.*s'", type, VALKEYMODULE_NODE_ID_LEN, + sender_id, (int)len, payload); } /* This function must be present on each module. It is used in order to @@ -92,15 +100,14 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellocluster",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellocluster", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellocluster.pingall", - PingallCommand_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellocluster.pingall", PingallCommand_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellocluster.list", - ListCommand_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellocluster.list", ListCommand_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; /* Disable Cluster sharding and redirections. This way every node @@ -109,10 +116,10 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg * variable. Normally you do that in order for the distributed system * you create as a module to have total freedom in the keyspace * manipulation. */ - ValkeyModule_SetClusterFlags(ctx,VALKEYMODULE_CLUSTER_FLAG_NO_REDIRECTION); + ValkeyModule_SetClusterFlags(ctx, VALKEYMODULE_CLUSTER_FLAG_NO_REDIRECTION); /* Register our handlers for different message types. */ - ValkeyModule_RegisterClusterMessageReceiver(ctx,MSGTYPE_PING,PingReceiver); - ValkeyModule_RegisterClusterMessageReceiver(ctx,MSGTYPE_PONG,PongReceiver); + ValkeyModule_RegisterClusterMessageReceiver(ctx, MSGTYPE_PING, PingReceiver); + ValkeyModule_RegisterClusterMessageReceiver(ctx, MSGTYPE_PONG, PongReceiver); return VALKEYMODULE_OK; } diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c index e699e38f1c..38081919f3 100644 --- a/src/modules/hellodict.c +++ b/src/modules/hellodict.c @@ -46,10 +46,10 @@ static ValkeyModuleDict *Keyspace; * Set the specified key to the specified value. */ int cmd_SET(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); - ValkeyModule_DictSet(Keyspace,argv[1],argv[2]); + ValkeyModule_DictSet(Keyspace, argv[1], argv[2]); /* We need to keep a reference to the value stored at the key, otherwise * it would be freed when this callback returns. */ - ValkeyModule_RetainString(NULL,argv[2]); + ValkeyModule_RetainString(NULL, argv[2]); return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } @@ -59,7 +59,7 @@ int cmd_SET(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { * is not defined. */ int cmd_GET(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); - ValkeyModuleString *val = ValkeyModule_DictGet(Keyspace,argv[1],NULL); + ValkeyModuleString *val = ValkeyModule_DictGet(Keyspace, argv[1], NULL); if (val == NULL) { return ValkeyModule_ReplyWithNull(ctx); } else { @@ -76,27 +76,25 @@ int cmd_KEYRANGE(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { /* Parse the count argument. */ long long count; - if (ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } /* Seek the iterator. */ - ValkeyModuleDictIter *iter = ValkeyModule_DictIteratorStart( - Keyspace, ">=", argv[1]); + ValkeyModuleDictIter *iter = ValkeyModule_DictIteratorStart(Keyspace, ">=", argv[1]); /* Reply with the matching items. */ char *key; size_t keylen; long long replylen = 0; /* Keep track of the emitted array len. */ - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); - while((key = ValkeyModule_DictNextC(iter,&keylen,NULL)) != NULL) { + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); + while ((key = ValkeyModule_DictNextC(iter, &keylen, NULL)) != NULL) { if (replylen >= count) break; - if (ValkeyModule_DictCompare(iter,"<=",argv[2]) == VALKEYMODULE_ERR) - break; - ValkeyModule_ReplyWithStringBuffer(ctx,key,keylen); + if (ValkeyModule_DictCompare(iter, "<=", argv[2]) == VALKEYMODULE_ERR) break; + ValkeyModule_ReplyWithStringBuffer(ctx, key, keylen); replylen++; } - ValkeyModule_ReplySetArrayLength(ctx,replylen); + ValkeyModule_ReplySetArrayLength(ctx, replylen); /* Cleanup. */ ValkeyModule_DictIteratorStop(iter); @@ -109,19 +107,15 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellodict",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellodict", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellodict.set", - cmd_SET,"write deny-oom",1,1,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellodict.set", cmd_SET, "write deny-oom", 1, 1, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellodict.get", - cmd_GET,"readonly",1,1,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellodict.get", cmd_GET, "readonly", 1, 1, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellodict.keyrange", - cmd_KEYRANGE,"readonly",1,1,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellodict.keyrange", cmd_KEYRANGE, "readonly", 1, 1, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; /* Create our global dictionary. Here we'll set our keys and values. */ diff --git a/src/modules/hellohook.c b/src/modules/hellohook.c index d6eead5b80..35a1ed0a1a 100644 --- a/src/modules/hellohook.c +++ b/src/modules/hellohook.c @@ -37,20 +37,17 @@ #include /* Client state change callback. */ -void clientChangeCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) -{ +void clientChangeCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) { VALKEYMODULE_NOT_USED(ctx); VALKEYMODULE_NOT_USED(e); ValkeyModuleClientInfo *ci = data; printf("Client %s event for client #%llu %s:%d\n", - (sub == VALKEYMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED) ? - "connection" : "disconnection", - (unsigned long long)ci->id,ci->addr,ci->port); + (sub == VALKEYMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED) ? "connection" : "disconnection", + (unsigned long long)ci->id, ci->addr, ci->port); } -void flushdbCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) -{ +void flushdbCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, void *data) { VALKEYMODULE_NOT_USED(ctx); VALKEYMODULE_NOT_USED(e); @@ -58,17 +55,16 @@ void flushdbCallback(ValkeyModuleCtx *ctx, ValkeyModuleEvent e, uint64_t sub, vo if (sub == VALKEYMODULE_SUBEVENT_FLUSHDB_START) { if (fi->dbnum != -1) { ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"DBSIZE",""); + reply = ValkeyModule_Call(ctx, "DBSIZE", ""); long long numkeys = ValkeyModule_CallReplyInteger(reply); - printf("FLUSHDB event of database %d started (%lld keys in DB)\n", - fi->dbnum, numkeys); + printf("FLUSHDB event of database %d started (%lld keys in DB)\n", fi->dbnum, numkeys); ValkeyModule_FreeCallReply(reply); } else { printf("FLUSHALL event started\n"); } } else { if (fi->dbnum != -1) { - printf("FLUSHDB event of database %d ended\n",fi->dbnum); + printf("FLUSHDB event of database %d ended\n", fi->dbnum); } else { printf("FLUSHALL event ended\n"); } @@ -81,12 +77,9 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellohook",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellohook", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - ValkeyModule_SubscribeToServerEvent(ctx, - ValkeyModuleEvent_ClientChange, clientChangeCallback); - ValkeyModule_SubscribeToServerEvent(ctx, - ValkeyModuleEvent_FlushDB, flushdbCallback); + ValkeyModule_SubscribeToServerEvent(ctx, ValkeyModuleEvent_ClientChange, clientChangeCallback); + ValkeyModule_SubscribeToServerEvent(ctx, ValkeyModuleEvent_FlushDB, flushdbCallback); return VALKEYMODULE_OK; } diff --git a/src/modules/hellotimer.c b/src/modules/hellotimer.c index b0ed3d0b08..40ba323e58 100644 --- a/src/modules/hellotimer.c +++ b/src/modules/hellotimer.c @@ -51,8 +51,8 @@ int TimerCommand_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, for (int j = 0; j < 10; j++) { int delay = rand() % 5000; char *buf = ValkeyModule_Alloc(256); - snprintf(buf,256,"After %d", delay); - ValkeyModuleTimerID tid = ValkeyModule_CreateTimer(ctx,delay,timerHandler,buf); + snprintf(buf, 256, "After %d", delay); + ValkeyModuleTimerID tid = ValkeyModule_CreateTimer(ctx, delay, timerHandler, buf); VALKEYMODULE_NOT_USED(tid); } return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); @@ -64,11 +64,10 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellotimer",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellotimer", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotimer.timer", - TimerCommand_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotimer.timer", TimerCommand_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/modules/hellotype.c b/src/modules/hellotype.c index 531f7465ce..7e2dc60c68 100644 --- a/src/modules/hellotype.c +++ b/src/modules/hellotype.c @@ -71,7 +71,7 @@ struct HelloTypeObject *createHelloTypeObject(void) { void HelloTypeInsert(struct HelloTypeObject *o, int64_t ele) { struct HelloTypeNode *next = o->head, *newnode, *prev = NULL; - while(next && next->value < ele) { + while (next && next->value < ele) { prev = next; next = next->next; } @@ -89,7 +89,7 @@ void HelloTypeInsert(struct HelloTypeObject *o, int64_t ele) { void HelloTypeReleaseObject(struct HelloTypeObject *o) { struct HelloTypeNode *cur, *next; cur = o->head; - while(cur) { + while (cur) { next = cur->next; ValkeyModule_Free(cur); cur = next; @@ -104,34 +104,31 @@ int HelloTypeInsert_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 3) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long value; - if ((ValkeyModule_StringToLongLong(argv[2],&value) != VALKEYMODULE_OK)) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid value: must be a signed 64 bit integer"); + if ((ValkeyModule_StringToLongLong(argv[2], &value) != VALKEYMODULE_OK)) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid value: must be a signed 64 bit integer"); } /* Create an empty value object if the key is currently empty. */ struct HelloTypeObject *hto; if (type == VALKEYMODULE_KEYTYPE_EMPTY) { hto = createHelloTypeObject(); - ValkeyModule_ModuleTypeSetValue(key,HelloType,hto); + ValkeyModule_ModuleTypeSetValue(key, HelloType, hto); } else { hto = ValkeyModule_ModuleTypeGetValue(key); } /* Insert the new element. */ - HelloTypeInsert(hto,value); - ValkeyModule_SignalKeyAsReady(ctx,argv[1]); + HelloTypeInsert(hto, value); + ValkeyModule_SignalKeyAsReady(ctx, argv[1]); - ValkeyModule_ReplyWithLongLong(ctx,hto->len); + ValkeyModule_ReplyWithLongLong(ctx, hto->len); ValkeyModule_ReplicateVerbatim(ctx); return VALKEYMODULE_OK; } @@ -141,34 +138,28 @@ int HelloTypeRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 4) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long first, count; - if (ValkeyModule_StringToLongLong(argv[2],&first) != VALKEYMODULE_OK || - ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK || - first < 0 || count < 0) - { - return ValkeyModule_ReplyWithError(ctx, - "ERR invalid first or count parameters"); + if (ValkeyModule_StringToLongLong(argv[2], &first) != VALKEYMODULE_OK || + ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK || first < 0 || count < 0) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid first or count parameters"); } struct HelloTypeObject *hto = ValkeyModule_ModuleTypeGetValue(key); struct HelloTypeNode *node = hto ? hto->head : NULL; - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); long long arraylen = 0; - while(node && count--) { - ValkeyModule_ReplyWithLongLong(ctx,node->value); + while (node && count--) { + ValkeyModule_ReplyWithLongLong(ctx, node->value); arraylen++; node = node->next; } - ValkeyModule_ReplySetArrayLength(ctx,arraylen); + ValkeyModule_ReplySetArrayLength(ctx, arraylen); return VALKEYMODULE_OK; } @@ -177,17 +168,14 @@ int HelloTypeLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 2) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } struct HelloTypeObject *hto = ValkeyModule_ModuleTypeGetValue(key); - ValkeyModule_ReplyWithLongLong(ctx,hto ? hto->len : 0); + ValkeyModule_ReplyWithLongLong(ctx, hto ? hto->len : 0); return VALKEYMODULE_OK; } @@ -201,11 +189,9 @@ int HelloBlock_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) VALKEYMODULE_NOT_USED(argc); ValkeyModuleString *keyname = ValkeyModule_GetBlockedClientReadyKey(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,keyname,VALKEYMODULE_READ); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, keyname, VALKEYMODULE_READ); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_MODULE || - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { + if (type != VALKEYMODULE_KEYTYPE_MODULE || ValkeyModule_ModuleTypeGetType(key) != HelloType) { ValkeyModule_CloseKey(key); return VALKEYMODULE_ERR; } @@ -213,14 +199,14 @@ int HelloBlock_Reply(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) /* In case the key is able to serve our blocked client, let's directly * use our original command implementation to make this example simpler. */ ValkeyModule_CloseKey(key); - return HelloTypeRange_ValkeyCommand(ctx,argv,argc-1); + return HelloTypeRange_ValkeyCommand(ctx, argv, argc - 1); } /* Timeout callback for blocking command HELLOTYPE.BRANGE */ int HelloBlock_Timeout(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - return ValkeyModule_ReplyWithSimpleString(ctx,"Request timedout"); + return ValkeyModule_ReplyWithSimpleString(ctx, "Request timedout"); } /* Private data freeing callback for HELLOTYPE.BRANGE command. */ @@ -235,31 +221,28 @@ void HelloBlock_FreeData(ValkeyModuleCtx *ctx, void *privdata) { int HelloTypeBRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 5) return ValkeyModule_WrongArity(ctx); ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_EMPTY && - ValkeyModule_ModuleTypeGetType(key) != HelloType) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_EMPTY && ValkeyModule_ModuleTypeGetType(key) != HelloType) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } /* Parse the timeout before even trying to serve the client synchronously, * so that we always fail ASAP on syntax errors. */ long long timeout; - if (ValkeyModule_StringToLongLong(argv[4],&timeout) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx, - "ERR invalid timeout parameter"); + if (ValkeyModule_StringToLongLong(argv[4], &timeout) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid timeout parameter"); } /* Can we serve the reply synchronously? */ if (type != VALKEYMODULE_KEYTYPE_EMPTY) { - return HelloTypeRange_ValkeyCommand(ctx,argv,argc-1); + return HelloTypeRange_ValkeyCommand(ctx, argv, argc - 1); } /* Otherwise let's block on the key. */ void *privdata = ValkeyModule_Alloc(100); - ValkeyModule_BlockClientOnKeys(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout,argv+1,1,privdata); + ValkeyModule_BlockClientOnKeys(ctx, HelloBlock_Reply, HelloBlock_Timeout, HelloBlock_FreeData, timeout, argv + 1, 1, + privdata); return VALKEYMODULE_OK; } @@ -272,9 +255,9 @@ void *HelloTypeRdbLoad(ValkeyModuleIO *rdb, int encver) { } uint64_t elements = ValkeyModule_LoadUnsigned(rdb); struct HelloTypeObject *hto = createHelloTypeObject(); - while(elements--) { + while (elements--) { int64_t ele = ValkeyModule_LoadSigned(rdb); - HelloTypeInsert(hto,ele); + HelloTypeInsert(hto, ele); } return hto; } @@ -282,9 +265,9 @@ void *HelloTypeRdbLoad(ValkeyModuleIO *rdb, int encver) { void HelloTypeRdbSave(ValkeyModuleIO *rdb, void *value) { struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - ValkeyModule_SaveUnsigned(rdb,hto->len); - while(node) { - ValkeyModule_SaveSigned(rdb,node->value); + ValkeyModule_SaveUnsigned(rdb, hto->len); + while (node) { + ValkeyModule_SaveSigned(rdb, node->value); node = node->next; } } @@ -292,8 +275,8 @@ void HelloTypeRdbSave(ValkeyModuleIO *rdb, void *value) { void HelloTypeAofRewrite(ValkeyModuleIO *aof, ValkeyModuleString *key, void *value) { struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - while(node) { - ValkeyModule_EmitAOF(aof,"HELLOTYPE.INSERT","sl",key,node->value); + while (node) { + ValkeyModule_EmitAOF(aof, "HELLOTYPE.INSERT", "sl", key, node->value); node = node->next; } } @@ -303,7 +286,7 @@ void HelloTypeAofRewrite(ValkeyModuleIO *aof, ValkeyModuleString *key, void *val size_t HelloTypeMemUsage(const void *value) { const struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - return sizeof(*hto) + sizeof(*node)*hto->len; + return sizeof(*hto) + sizeof(*node) * hto->len; } void HelloTypeFree(void *value) { @@ -313,8 +296,8 @@ void HelloTypeFree(void *value) { void HelloTypeDigest(ValkeyModuleDigest *md, void *value) { struct HelloTypeObject *hto = value; struct HelloTypeNode *node = hto->head; - while(node) { - ValkeyModule_DigestAddLongLong(md,node->value); + while (node) { + ValkeyModule_DigestAddLongLong(md, node->value); node = node->next; } ValkeyModule_DigestEndSequence(md); @@ -326,36 +309,33 @@ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int arg VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - if (ValkeyModule_Init(ctx,"hellotype",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "hellotype", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - ValkeyModuleTypeMethods tm = { - .version = VALKEYMODULE_TYPE_METHOD_VERSION, - .rdb_load = HelloTypeRdbLoad, - .rdb_save = HelloTypeRdbSave, - .aof_rewrite = HelloTypeAofRewrite, - .mem_usage = HelloTypeMemUsage, - .free = HelloTypeFree, - .digest = HelloTypeDigest - }; + ValkeyModuleTypeMethods tm = {.version = VALKEYMODULE_TYPE_METHOD_VERSION, + .rdb_load = HelloTypeRdbLoad, + .rdb_save = HelloTypeRdbSave, + .aof_rewrite = HelloTypeAofRewrite, + .mem_usage = HelloTypeMemUsage, + .free = HelloTypeFree, + .digest = HelloTypeDigest}; - HelloType = ValkeyModule_CreateDataType(ctx,"hellotype",0,&tm); + HelloType = ValkeyModule_CreateDataType(ctx, "hellotype", 0, &tm); if (HelloType == NULL) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.insert", - HelloTypeInsert_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.insert", HelloTypeInsert_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.range", - HelloTypeRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.range", HelloTypeRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.len", - HelloTypeLen_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.len", HelloTypeLen_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hellotype.brange", - HelloTypeBRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hellotype.brange", HelloTypeBRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/modules/helloworld.c b/src/modules/helloworld.c index af42ec3a33..f74e4e9b66 100644 --- a/src/modules/helloworld.c +++ b/src/modules/helloworld.c @@ -48,7 +48,7 @@ int HelloSimple_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); - ValkeyModule_ReplyWithLongLong(ctx,ValkeyModule_GetSelectedDb(ctx)); + ValkeyModule_ReplyWithLongLong(ctx, ValkeyModule_GetSelectedDb(ctx)); return VALKEYMODULE_OK; } @@ -58,17 +58,15 @@ int HelloSimple_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, i * * You'll find this command to be roughly as fast as the actual RPUSH * command. */ -int HelloPushNative_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloPushNative_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); - ValkeyModule_ListPush(key,VALKEYMODULE_LIST_TAIL,argv[2]); + ValkeyModule_ListPush(key, VALKEYMODULE_LIST_TAIL, argv[2]); size_t newlen = ValkeyModule_ValueLength(key); ValkeyModule_CloseKey(key); - ValkeyModule_ReplyWithLongLong(ctx,newlen); + ValkeyModule_ReplyWithLongLong(ctx, newlen); return VALKEYMODULE_OK; } @@ -77,30 +75,28 @@ int HelloPushNative_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg * approach is useful when you need to call commands that are not * available as low level APIs, or when you don't need the maximum speed * possible but instead prefer implementation simplicity. */ -int HelloPushCall_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloPushCall_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"RPUSH","ss",argv[1],argv[2]); + reply = ValkeyModule_Call(ctx, "RPUSH", "ss", argv[1], argv[2]); long long len = ValkeyModule_CallReplyInteger(reply); ValkeyModule_FreeCallReply(reply); - ValkeyModule_ReplyWithLongLong(ctx,len); + ValkeyModule_ReplyWithLongLong(ctx, len); return VALKEYMODULE_OK; } /* HELLO.PUSH.CALL2 * This is exactly as HELLO.PUSH.CALL, but shows how we can reply to the * client using directly a reply object that Call() returned. */ -int HelloPushCall2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloPushCall2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 3) return ValkeyModule_WrongArity(ctx); ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"RPUSH","ss",argv[1],argv[2]); - ValkeyModule_ReplyWithCallReply(ctx,reply); + reply = ValkeyModule_Call(ctx, "RPUSH", "ss", argv[1], argv[2]); + ValkeyModule_ReplyWithCallReply(ctx, reply); ValkeyModule_FreeCallReply(reply); return VALKEYMODULE_OK; } @@ -108,22 +104,21 @@ int HelloPushCall2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv /* HELLO.LIST.SUM.LEN returns the total length of all the items inside * a list, by using the high level Call() API. * This command is an example of the array reply access. */ -int HelloListSumLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloListSumLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); ValkeyModuleCallReply *reply; - reply = ValkeyModule_Call(ctx,"LRANGE","sll",argv[1],(long long)0,(long long)-1); + reply = ValkeyModule_Call(ctx, "LRANGE", "sll", argv[1], (long long)0, (long long)-1); size_t strlen = 0; size_t items = ValkeyModule_CallReplyLength(reply); size_t j; for (j = 0; j < items; j++) { - ValkeyModuleCallReply *ele = ValkeyModule_CallReplyArrayElement(reply,j); + ValkeyModuleCallReply *ele = ValkeyModule_CallReplyArrayElement(reply, j); strlen += ValkeyModule_CallReplyLength(ele); } ValkeyModule_FreeCallReply(reply); - ValkeyModule_ReplyWithLongLong(ctx,strlen); + ValkeyModule_ReplyWithLongLong(ctx, strlen); return VALKEYMODULE_OK; } @@ -134,43 +129,39 @@ int HelloListSumLen_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg int HelloListSplice_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 4) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); - ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx,argv[2], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); + ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx, argv[2], VALKEYMODULE_READ | VALKEYMODULE_WRITE); /* Src and dst key must be empty or lists. */ if ((ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_LIST && ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_EMPTY) || (ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_LIST && - ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) - { + ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) { ValkeyModule_CloseKey(srckey); ValkeyModule_CloseKey(dstkey); - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long count; - if ((ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK) || - (count < 0)) { + if ((ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK) || (count < 0)) { ValkeyModule_CloseKey(srckey); ValkeyModule_CloseKey(dstkey); - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } - while(count-- > 0) { + while (count-- > 0) { ValkeyModuleString *ele; - ele = ValkeyModule_ListPop(srckey,VALKEYMODULE_LIST_TAIL); + ele = ValkeyModule_ListPop(srckey, VALKEYMODULE_LIST_TAIL); if (ele == NULL) break; - ValkeyModule_ListPush(dstkey,VALKEYMODULE_LIST_HEAD,ele); - ValkeyModule_FreeString(ctx,ele); + ValkeyModule_ListPush(dstkey, VALKEYMODULE_LIST_HEAD, ele); + ValkeyModule_FreeString(ctx, ele); } size_t len = ValkeyModule_ValueLength(srckey); ValkeyModule_CloseKey(srckey); ValkeyModule_CloseKey(dstkey); - ValkeyModule_ReplyWithLongLong(ctx,len); + ValkeyModule_ReplyWithLongLong(ctx, len); return VALKEYMODULE_OK; } @@ -181,37 +172,32 @@ int HelloListSpliceAuto_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString * ValkeyModule_AutoMemory(ctx); - ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); - ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx,argv[2], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *srckey = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); + ValkeyModuleKey *dstkey = ValkeyModule_OpenKey(ctx, argv[2], VALKEYMODULE_READ | VALKEYMODULE_WRITE); /* Src and dst key must be empty or lists. */ if ((ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_LIST && ValkeyModule_KeyType(srckey) != VALKEYMODULE_KEYTYPE_EMPTY) || (ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_LIST && - ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + ValkeyModule_KeyType(dstkey) != VALKEYMODULE_KEYTYPE_EMPTY)) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } long long count; - if ((ValkeyModule_StringToLongLong(argv[3],&count) != VALKEYMODULE_OK) || - (count < 0)) - { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if ((ValkeyModule_StringToLongLong(argv[3], &count) != VALKEYMODULE_OK) || (count < 0)) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); } - while(count-- > 0) { + while (count-- > 0) { ValkeyModuleString *ele; - ele = ValkeyModule_ListPop(srckey,VALKEYMODULE_LIST_TAIL); + ele = ValkeyModule_ListPop(srckey, VALKEYMODULE_LIST_TAIL); if (ele == NULL) break; - ValkeyModule_ListPush(dstkey,VALKEYMODULE_LIST_HEAD,ele); + ValkeyModule_ListPush(dstkey, VALKEYMODULE_LIST_HEAD, ele); } size_t len = ValkeyModule_ValueLength(srckey); - ValkeyModule_ReplyWithLongLong(ctx,len); + ValkeyModule_ReplyWithLongLong(ctx, len); return VALKEYMODULE_OK; } @@ -221,15 +207,14 @@ int HelloListSpliceAuto_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString * int HelloRandArray_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); long long count; - if (ValkeyModule_StringToLongLong(argv[1],&count) != VALKEYMODULE_OK || - count < 0) - return ValkeyModule_ReplyWithError(ctx,"ERR invalid count"); + if (ValkeyModule_StringToLongLong(argv[1], &count) != VALKEYMODULE_OK || count < 0) + return ValkeyModule_ReplyWithError(ctx, "ERR invalid count"); /* To reply with an array, we call ValkeyModule_ReplyWithArray() followed * by other "count" calls to other reply functions in order to generate * the elements of the array. */ - ValkeyModule_ReplyWithArray(ctx,count); - while(count--) ValkeyModule_ReplyWithLongLong(ctx,rand()); + ValkeyModule_ReplyWithArray(ctx, count); + while (count--) ValkeyModule_ReplyWithLongLong(ctx, rand()); return VALKEYMODULE_OK; } @@ -237,8 +222,7 @@ int HelloRandArray_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv * in the ValkeyModule_Call() call, the two INCRs get replicated. * Also note how the ECHO is replicated in an unexpected position (check * comments the function implementation). */ -int HelloRepl1_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) -{ +int HelloRepl1_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); ValkeyModule_AutoMemory(ctx); @@ -253,21 +237,21 @@ int HelloRepl1_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in * ECHO c foo * EXEC */ - ValkeyModule_Replicate(ctx,"ECHO","c","foo"); + ValkeyModule_Replicate(ctx, "ECHO", "c", "foo"); /* Using the "!" modifier we replicate the command if it * modified the dataset in some way. */ - ValkeyModule_Call(ctx,"INCR","c!","foo"); - ValkeyModule_Call(ctx,"INCR","c!","bar"); + ValkeyModule_Call(ctx, "INCR", "c!", "foo"); + ValkeyModule_Call(ctx, "INCR", "c!", "bar"); - ValkeyModule_ReplyWithLongLong(ctx,0); + ValkeyModule_ReplyWithLongLong(ctx, 0); return VALKEYMODULE_OK; } /* Another command to show replication. In this case, we call * ValkeyModule_ReplicateVerbatim() to mean we want just the command to be - * propagated to slaves / AOF exactly as it was called by the user. + * propagated to replicas / AOF exactly as it was called by the user. * * This command also shows how to work with string objects. * It takes a list, and increments all the elements (that must have @@ -279,26 +263,25 @@ int HelloRepl2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in if (argc != 2) return ValkeyModule_WrongArity(ctx); ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); if (ValkeyModule_KeyType(key) != VALKEYMODULE_KEYTYPE_LIST) - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); size_t listlen = ValkeyModule_ValueLength(key); long long sum = 0; /* Rotate and increment. */ - while(listlen--) { - ValkeyModuleString *ele = ValkeyModule_ListPop(key,VALKEYMODULE_LIST_TAIL); + while (listlen--) { + ValkeyModuleString *ele = ValkeyModule_ListPop(key, VALKEYMODULE_LIST_TAIL); long long val; - if (ValkeyModule_StringToLongLong(ele,&val) != VALKEYMODULE_OK) val = 0; + if (ValkeyModule_StringToLongLong(ele, &val) != VALKEYMODULE_OK) val = 0; val++; sum += val; - ValkeyModuleString *newele = ValkeyModule_CreateStringFromLongLong(ctx,val); - ValkeyModule_ListPush(key,VALKEYMODULE_LIST_HEAD,newele); + ValkeyModuleString *newele = ValkeyModule_CreateStringFromLongLong(ctx, val); + ValkeyModule_ListPush(key, VALKEYMODULE_LIST_HEAD, newele); } - ValkeyModule_ReplyWithLongLong(ctx,sum); + ValkeyModule_ReplyWithLongLong(ctx, sum); ValkeyModule_ReplicateVerbatim(ctx); return VALKEYMODULE_OK; } @@ -314,20 +297,17 @@ int HelloRepl2_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in int HelloToggleCase_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { if (argc != 2) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int keytype = ValkeyModule_KeyType(key); - if (keytype != VALKEYMODULE_KEYTYPE_STRING && - keytype != VALKEYMODULE_KEYTYPE_EMPTY) - { + if (keytype != VALKEYMODULE_KEYTYPE_STRING && keytype != VALKEYMODULE_KEYTYPE_EMPTY) { ValkeyModule_CloseKey(key); - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } if (keytype == VALKEYMODULE_KEYTYPE_STRING) { size_t len, j; - char *s = ValkeyModule_StringDMA(key,&len,VALKEYMODULE_WRITE); + char *s = ValkeyModule_StringDMA(key, &len, VALKEYMODULE_WRITE); for (j = 0; j < len; j++) { if (isupper(s[j])) { s[j] = tolower(s[j]); @@ -338,7 +318,7 @@ int HelloToggleCase_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg } ValkeyModule_CloseKey(key); - ValkeyModule_ReplyWithSimpleString(ctx,"OK"); + ValkeyModule_ReplyWithSimpleString(ctx, "OK"); ValkeyModule_ReplicateVerbatim(ctx); return VALKEYMODULE_OK; } @@ -353,17 +333,16 @@ int HelloMoreExpire_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg mstime_t addms, expire; - if (ValkeyModule_StringToLongLong(argv[2],&addms) != VALKEYMODULE_OK) - return ValkeyModule_ReplyWithError(ctx,"ERR invalid expire time"); + if (ValkeyModule_StringToLongLong(argv[2], &addms) != VALKEYMODULE_OK) + return ValkeyModule_ReplyWithError(ctx, "ERR invalid expire time"); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); expire = ValkeyModule_GetExpire(key); if (expire != VALKEYMODULE_NO_EXPIRE) { expire += addms; - ValkeyModule_SetExpire(key,expire); + ValkeyModule_SetExpire(key, expire); } - return ValkeyModule_ReplyWithSimpleString(ctx,"OK"); + return ValkeyModule_ReplyWithSimpleString(ctx, "OK"); } /* HELLO.ZSUMRANGE key startscore endscore @@ -376,36 +355,34 @@ int HelloZsumRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv double score_start, score_end; if (argc != 4) return ValkeyModule_WrongArity(ctx); - if (ValkeyModule_StringToDouble(argv[2],&score_start) != VALKEYMODULE_OK || - ValkeyModule_StringToDouble(argv[3],&score_end) != VALKEYMODULE_OK) - { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid range"); + if (ValkeyModule_StringToDouble(argv[2], &score_start) != VALKEYMODULE_OK || + ValkeyModule_StringToDouble(argv[3], &score_end) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid range"); } - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); if (ValkeyModule_KeyType(key) != VALKEYMODULE_KEYTYPE_ZSET) { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } double scoresum_a = 0; double scoresum_b = 0; - ValkeyModule_ZsetFirstInScoreRange(key,score_start,score_end,0,0); - while(!ValkeyModule_ZsetRangeEndReached(key)) { + ValkeyModule_ZsetFirstInScoreRange(key, score_start, score_end, 0, 0); + while (!ValkeyModule_ZsetRangeEndReached(key)) { double score; - ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key,&score); - ValkeyModule_FreeString(ctx,ele); + ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key, &score); + ValkeyModule_FreeString(ctx, ele); scoresum_a += score; ValkeyModule_ZsetRangeNext(key); } ValkeyModule_ZsetRangeStop(key); - ValkeyModule_ZsetLastInScoreRange(key,score_start,score_end,0,0); - while(!ValkeyModule_ZsetRangeEndReached(key)) { + ValkeyModule_ZsetLastInScoreRange(key, score_start, score_end, 0, 0); + while (!ValkeyModule_ZsetRangeEndReached(key)) { double score; - ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key,&score); - ValkeyModule_FreeString(ctx,ele); + ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key, &score); + ValkeyModule_FreeString(ctx, ele); scoresum_b += score; ValkeyModule_ZsetRangePrev(key); } @@ -414,9 +391,9 @@ int HelloZsumRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv ValkeyModule_CloseKey(key); - ValkeyModule_ReplyWithArray(ctx,2); - ValkeyModule_ReplyWithDouble(ctx,scoresum_a); - ValkeyModule_ReplyWithDouble(ctx,scoresum_b); + ValkeyModule_ReplyWithArray(ctx, 2); + ValkeyModule_ReplyWithDouble(ctx, scoresum_a); + ValkeyModule_ReplyWithDouble(ctx, scoresum_b); return VALKEYMODULE_OK; } @@ -432,28 +409,27 @@ int HelloLexRange_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, if (argc != 6) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); if (ValkeyModule_KeyType(key) != VALKEYMODULE_KEYTYPE_ZSET) { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } - if (ValkeyModule_ZsetFirstInLexRange(key,argv[2],argv[3]) != VALKEYMODULE_OK) { - return ValkeyModule_ReplyWithError(ctx,"invalid range"); + if (ValkeyModule_ZsetFirstInLexRange(key, argv[2], argv[3]) != VALKEYMODULE_OK) { + return ValkeyModule_ReplyWithError(ctx, "invalid range"); } int arraylen = 0; - ValkeyModule_ReplyWithArray(ctx,VALKEYMODULE_POSTPONED_LEN); - while(!ValkeyModule_ZsetRangeEndReached(key)) { + ValkeyModule_ReplyWithArray(ctx, VALKEYMODULE_POSTPONED_LEN); + while (!ValkeyModule_ZsetRangeEndReached(key)) { double score; - ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key,&score); - ValkeyModule_ReplyWithString(ctx,ele); - ValkeyModule_FreeString(ctx,ele); + ValkeyModuleString *ele = ValkeyModule_ZsetRangeCurrentElement(key, &score); + ValkeyModule_ReplyWithString(ctx, ele); + ValkeyModule_FreeString(ctx, ele); ValkeyModule_ZsetRangeNext(key); arraylen++; } ValkeyModule_ZsetRangeStop(key); - ValkeyModule_ReplySetArrayLength(ctx,arraylen); + ValkeyModule_ReplySetArrayLength(ctx, arraylen); ValkeyModule_CloseKey(key); return VALKEYMODULE_OK; } @@ -469,22 +445,19 @@ int HelloHCopy_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, in ValkeyModule_AutoMemory(ctx); /* Use automatic memory management. */ if (argc != 4) return ValkeyModule_WrongArity(ctx); - ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx,argv[1], - VALKEYMODULE_READ|VALKEYMODULE_WRITE); + ValkeyModuleKey *key = ValkeyModule_OpenKey(ctx, argv[1], VALKEYMODULE_READ | VALKEYMODULE_WRITE); int type = ValkeyModule_KeyType(key); - if (type != VALKEYMODULE_KEYTYPE_HASH && - type != VALKEYMODULE_KEYTYPE_EMPTY) - { - return ValkeyModule_ReplyWithError(ctx,VALKEYMODULE_ERRORMSG_WRONGTYPE); + if (type != VALKEYMODULE_KEYTYPE_HASH && type != VALKEYMODULE_KEYTYPE_EMPTY) { + return ValkeyModule_ReplyWithError(ctx, VALKEYMODULE_ERRORMSG_WRONGTYPE); } /* Get the old field value. */ ValkeyModuleString *oldval; - ValkeyModule_HashGet(key,VALKEYMODULE_HASH_NONE,argv[2],&oldval,NULL); + ValkeyModule_HashGet(key, VALKEYMODULE_HASH_NONE, argv[2], &oldval, NULL); if (oldval) { - ValkeyModule_HashSet(key,VALKEYMODULE_HASH_NONE,argv[3],oldval,NULL); + ValkeyModule_HashSet(key, VALKEYMODULE_HASH_NONE, argv[3], oldval, NULL); } - ValkeyModule_ReplyWithLongLong(ctx,oldval != NULL); + ValkeyModule_ReplyWithLongLong(ctx, oldval != NULL); return VALKEYMODULE_OK; } @@ -512,9 +485,8 @@ int HelloLeftPad_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, if (argc != 4) return ValkeyModule_WrongArity(ctx); - if ((ValkeyModule_StringToLongLong(argv[2],&padlen) != VALKEYMODULE_OK) || - (padlen< 0)) { - return ValkeyModule_ReplyWithError(ctx,"ERR invalid padding length"); + if ((ValkeyModule_StringToLongLong(argv[2], &padlen) != VALKEYMODULE_OK) || (padlen < 0)) { + return ValkeyModule_ReplyWithError(ctx, "ERR invalid padding length"); } size_t strlen, chlen; const char *str = ValkeyModule_StringPtrLen(argv[1], &strlen); @@ -522,99 +494,91 @@ int HelloLeftPad_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, /* If the string is already larger than the target len, just return * the string itself. */ - if (strlen >= (size_t)padlen) - return ValkeyModule_ReplyWithString(ctx,argv[1]); + if (strlen >= (size_t)padlen) return ValkeyModule_ReplyWithString(ctx, argv[1]); /* Padding must be a single character in this simple implementation. */ - if (chlen != 1) - return ValkeyModule_ReplyWithError(ctx, - "ERR padding must be a single char"); + if (chlen != 1) return ValkeyModule_ReplyWithError(ctx, "ERR padding must be a single char"); /* Here we use our pool allocator, for our throw-away allocation. */ padlen -= strlen; - char *buf = ValkeyModule_PoolAlloc(ctx,padlen+strlen); + char *buf = ValkeyModule_PoolAlloc(ctx, padlen + strlen); for (long long j = 0; j < padlen; j++) buf[j] = *ch; - memcpy(buf+padlen,str,strlen); + memcpy(buf + padlen, str, strlen); - ValkeyModule_ReplyWithStringBuffer(ctx,buf,padlen+strlen); + ValkeyModule_ReplyWithStringBuffer(ctx, buf, padlen + strlen); return VALKEYMODULE_OK; } /* This function must be present on each module. It is used in order to * register the commands into the server. */ int ValkeyModule_OnLoad(ValkeyModuleCtx *ctx, ValkeyModuleString **argv, int argc) { - if (ValkeyModule_Init(ctx,"helloworld",1,VALKEYMODULE_APIVER_1) - == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; + if (ValkeyModule_Init(ctx, "helloworld", 1, VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; /* Log the list of parameters passing loading the module. */ for (int j = 0; j < argc; j++) { - const char *s = ValkeyModule_StringPtrLen(argv[j],NULL); + const char *s = ValkeyModule_StringPtrLen(argv[j], NULL); printf("Module loaded with ARGV[%d] = %s\n", j, s); } - if (ValkeyModule_CreateCommand(ctx,"hello.simple", - HelloSimple_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.simple", HelloSimple_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.push.native", - HelloPushNative_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.push.native", HelloPushNative_ValkeyCommand, "write deny-oom", 1, 1, + 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.push.call", - HelloPushCall_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.push.call", HelloPushCall_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.push.call2", - HelloPushCall2_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.push.call2", HelloPushCall2_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.list.sum.len", - HelloListSumLen_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.list.sum.len", HelloListSumLen_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.list.splice", - HelloListSplice_ValkeyCommand,"write deny-oom",1,2,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.list.splice", HelloListSplice_ValkeyCommand, "write deny-oom", 1, 2, + 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.list.splice.auto", - HelloListSpliceAuto_ValkeyCommand, - "write deny-oom",1,2,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.list.splice.auto", HelloListSpliceAuto_ValkeyCommand, "write deny-oom", + 1, 2, 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.rand.array", - HelloRandArray_ValkeyCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.rand.array", HelloRandArray_ValkeyCommand, "readonly", 0, 0, 0) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.repl1", - HelloRepl1_ValkeyCommand,"write",0,0,0) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.repl1", HelloRepl1_ValkeyCommand, "write", 0, 0, 0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.repl2", - HelloRepl2_ValkeyCommand,"write",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.repl2", HelloRepl2_ValkeyCommand, "write", 1, 1, 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.toggle.case", - HelloToggleCase_ValkeyCommand,"write",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.toggle.case", HelloToggleCase_ValkeyCommand, "write", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.more.expire", - HelloMoreExpire_ValkeyCommand,"write",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.more.expire", HelloMoreExpire_ValkeyCommand, "write", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.zsumrange", - HelloZsumRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.zsumrange", HelloZsumRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.lexrange", - HelloLexRange_ValkeyCommand,"readonly",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.lexrange", HelloLexRange_ValkeyCommand, "readonly", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.hcopy", - HelloHCopy_ValkeyCommand,"write deny-oom",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.hcopy", HelloHCopy_ValkeyCommand, "write deny-oom", 1, 1, 1) == + VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - if (ValkeyModule_CreateCommand(ctx,"hello.leftpad", - HelloLeftPad_ValkeyCommand,"",1,1,1) == VALKEYMODULE_ERR) + if (ValkeyModule_CreateCommand(ctx, "hello.leftpad", HelloLeftPad_ValkeyCommand, "", 1, 1, 1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; return VALKEYMODULE_OK; diff --git a/src/networking.c b/src/networking.c index 39eaf8b17e..ecdeeb6588 100644 --- a/src/networking.c +++ b/src/networking.c @@ -28,7 +28,6 @@ */ #include "server.h" -#include "atomicvar.h" #include "cluster.h" #include "script.h" #include "fpconv_dtoa.h" @@ -37,12 +36,14 @@ #include #include #include +#include static void setProtocolError(const char *errstr, client *c); static void pauseClientsByClient(mstime_t end, int isPauseClientAll); int postponeClientRead(client *c); char *getClientSockname(client *c); int ProcessingEventsWhileBlocked = 0; /* See processEventsWhileBlocked(). */ +__thread sds thread_shared_qb = NULL; /* Return the size consumed from the allocator, for the specified SDS string, * including internal fragmentation. This function is used in order to compute @@ -103,14 +104,18 @@ static void clientSetDefaultAuth(client *c) { /* If the default user does not require authentication, the user is * directly authenticated. */ c->user = DefaultUser; - c->authenticated = (c->user->flags & USER_FLAG_NOPASS) && !(c->user->flags & USER_FLAG_DISABLED); + if ((c->user->flags & USER_FLAG_NOPASS) && !(c->user->flags & USER_FLAG_DISABLED)) { + c->flags |= CLIENT_AUTHENTICATED; + } else { + c->flags &= ~CLIENT_AUTHENTICATED; + } } int authRequired(client *c) { /* Check if the user is authenticated. This check is skipped in case * the default user is flagged as "nopass" and is active. */ - int auth_required = - (!(DefaultUser->flags & USER_FLAG_NOPASS) || (DefaultUser->flags & USER_FLAG_DISABLED)) && !c->authenticated; + int auth_required = (!(DefaultUser->flags & USER_FLAG_NOPASS) || (DefaultUser->flags & USER_FLAG_DISABLED)) && + !(c->flags & CLIENT_AUTHENTICATED); return auth_required; } @@ -129,8 +134,7 @@ client *createClient(connection *conn) { } c->buf = zmalloc_usable(PROTO_REPLY_CHUNK_BYTES, &c->buf_usable_size); selectDb(c, 0); - uint64_t client_id; - atomicGetIncr(server.next_client_id, client_id, 1); + uint64_t client_id = atomic_fetch_add_explicit(&server.next_client_id, 1, memory_order_relaxed); c->id = client_id; #ifdef LOG_REQ_RES reqresReset(c, 0); @@ -148,7 +152,7 @@ client *createClient(connection *conn) { c->ref_repl_buf_node = NULL; c->ref_block_pos = 0; c->qb_pos = 0; - c->querybuf = sdsempty(); + c->querybuf = NULL; c->querybuf_peak = 0; c->reqtype = 0; c->argc = 0; @@ -164,10 +168,10 @@ client *createClient(connection *conn) { c->sentlen = 0; c->flags = 0; c->slot = -1; - c->ctime = c->lastinteraction = server.unixtime; + c->ctime = c->last_interaction = server.unixtime; c->duration = 0; clientSetDefaultAuth(c); - c->replstate = REPL_STATE_NONE; + c->repl_state = REPL_STATE_NONE; c->repl_start_cmd_stream_on_ack = 0; c->reploff = 0; c->read_reploff = 0; @@ -176,10 +180,11 @@ client *createClient(connection *conn) { c->repl_ack_time = 0; c->repl_aof_off = 0; c->repl_last_partial_write = 0; - c->slave_listening_port = 0; - c->slave_addr = NULL; - c->slave_capa = SLAVE_CAPA_NONE; - c->slave_req = SLAVE_REQ_NONE; + c->replica_listening_port = 0; + c->replica_addr = NULL; + c->replica_version = 0; + c->replica_capa = REPLICA_CAPA_NONE; + c->replica_req = REPLICA_REQ_NONE; c->reply = listCreate(); c->deferred_reply_errors = NULL; c->reply_bytes = 0; @@ -241,10 +246,11 @@ void installClientWriteHandler(client *c) { * buffers can hold, then we'll really install the handler. */ void putClientInPendingWriteQueue(client *c) { /* Schedule the client to write the output buffers to the socket only - * if not already done and, for slaves, if the slave can actually receive + * if not already done and, for replicas, if the replica can actually receive * writes at this stage. */ if (!(c->flags & CLIENT_PENDING_WRITE) && - (c->replstate == REPL_STATE_NONE || (c->replstate == SLAVE_STATE_ONLINE && !c->repl_start_cmd_stream_on_ack))) { + (c->repl_state == REPL_STATE_NONE || + (c->repl_state == REPLICA_STATE_ONLINE && !c->repl_start_cmd_stream_on_ack))) { /* Here instead of installing the write handler, we just flag the * client and put it into a list of clients that have something * to write to the socket. This way before re-entering the event @@ -264,7 +270,7 @@ void putClientInPendingWriteQueue(client *c) { * loop so that when the socket is writable new data gets written. * * If the client should not receive new data, because it is a fake client - * (used to load AOF in memory), a master or because the setup of the write + * (used to load AOF in memory), a primary or because the setup of the write * handler failed, the function returns C_ERR. * * The function may return C_OK without actually installing the write @@ -272,7 +278,7 @@ void putClientInPendingWriteQueue(client *c) { * * 1) The event handler should already be installed since the output buffer * already contains something. - * 2) The client is a slave but not yet online, so we want to just accumulate + * 2) The client is a replica but not yet online, so we want to just accumulate * writes in the buffer but not actually sending them yet. * * Typically gets called every time a reply is built, before adding more @@ -290,9 +296,9 @@ int prepareClientToWrite(client *c) { * CLIENT_PUSHING handling: disables the reply silencing flags. */ if ((c->flags & (CLIENT_REPLY_OFF | CLIENT_REPLY_SKIP)) && !(c->flags & CLIENT_PUSHING)) return C_ERR; - /* Masters don't receive replies, unless CLIENT_MASTER_FORCE_REPLY flag + /* Primaries don't receive replies, unless CLIENT_PRIMARY_FORCE_REPLY flag * is set. */ - if ((c->flags & CLIENT_MASTER) && !(c->flags & CLIENT_MASTER_FORCE_REPLY)) return C_ERR; + if ((c->flags & CLIENT_PRIMARY) && !(c->flags & CLIENT_PRIMARY_FORCE_REPLY)) return C_ERR; if (!c->conn) return C_ERR; /* Fake client for AOF loading. */ @@ -427,7 +433,7 @@ void _addReplyToBufferOrList(client *c, const char *s, size_t len) { * replication link that caused a reply to be generated we'll simply disconnect it. * Note this is the simplest way to check a command added a response. Replication links are used to write data but * not for responses, so we should normally never get here on a replica client. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { sds cmdname = c->lastcmd ? c->lastcmd->fullname : NULL; logInvalidUseAndFreeClientAsync(c, "Replica generated a reply to command '%s'", cmdname ? cmdname : ""); @@ -562,24 +568,24 @@ void afterErrorReply(client *c, const char *s, size_t len, int flags) { c->realcmd->failed_calls++; } - /* Sometimes it could be normal that a slave replies to a master with + /* Sometimes it could be normal that a replica replies to a primary with * an error and this function gets called. Actually the error will never - * be sent because addReply*() against master clients has no effect... + * be sent because addReply*() against primary clients has no effect... * A notable example is: * * EVAL 'redis.call("incr",KEYS[1]); redis.call("nonexisting")' 1 x * - * Where the master must propagate the first change even if the second + * Where the primary must propagate the first change even if the second * will produce an error. However it is useful to log such events since * they are rare and may hint at errors in a script or a bug in the server. */ int ctype = getClientType(c); - if (ctype == CLIENT_TYPE_MASTER || ctype == CLIENT_TYPE_SLAVE || c->id == CLIENT_ID_AOF) { + if (ctype == CLIENT_TYPE_PRIMARY || ctype == CLIENT_TYPE_REPLICA || c->id == CLIENT_ID_AOF) { char *to, *from; if (c->id == CLIENT_ID_AOF) { to = "AOF-loading-client"; from = "server"; - } else if (ctype == CLIENT_TYPE_MASTER) { + } else if (ctype == CLIENT_TYPE_PRIMARY) { to = "master"; from = "replica"; } else { @@ -594,16 +600,16 @@ void afterErrorReply(client *c, const char *s, size_t len, int flags) { "to its %s: '%.*s' after processing the command " "'%s'", from, to, (int)len, s, cmdname ? cmdname : ""); - if (ctype == CLIENT_TYPE_MASTER && server.repl_backlog && server.repl_backlog->histlen > 0) { + if (ctype == CLIENT_TYPE_PRIMARY && server.repl_backlog && server.repl_backlog->histlen > 0) { showLatestBacklog(); } server.stat_unexpected_error_replies++; /* Based off the propagation error behavior, check if we need to panic here. There * are currently two checked cases: - * * If this command was from our master and we are not a writable replica. + * * If this command was from our primary and we are not a writable replica. * * We are reading from an AOF file. */ - int panic_in_replicas = (ctype == CLIENT_TYPE_MASTER && server.repl_slave_ro) && + int panic_in_replicas = (ctype == CLIENT_TYPE_PRIMARY && server.repl_replica_ro) && (server.propagation_error_behavior == PROPAGATION_ERR_BEHAVIOR_PANIC || server.propagation_error_behavior == PROPAGATION_ERR_BEHAVIOR_PANIC_ON_REPLICAS); int panic_in_aof = @@ -765,7 +771,7 @@ void *addReplyDeferredLen(client *c) { * replication link that caused a reply to be generated we'll simply disconnect it. * Note this is the simplest way to check a command added a response. Replication links are used to write data but * not for responses, so we should normally never get here on a replica client. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { sds cmdname = c->lastcmd ? c->lastcmd->fullname : NULL; logInvalidUseAndFreeClientAsync(c, "Replica generated a reply to command '%s'", cmdname ? cmdname : ""); @@ -1256,7 +1262,7 @@ void copyReplicaOutputBuffer(client *dst, client *src) { /* Return true if the specified client has pending reply buffers to write to * the socket. */ int clientHasPendingReplies(client *c) { - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { /* Replicas use global shared replication buffer instead of * private output buffer. */ serverAssert(c->bufpos == 0 && listLength(c->reply) == 0); @@ -1290,19 +1296,19 @@ void clientAcceptHandler(connection *conn) { * user what to do to fix it if needed. */ if (server.protected_mode && DefaultUser->flags & USER_FLAG_NOPASS) { if (connIsLocal(conn) != 1) { - char *err = "-DENIED Redis is running in protected mode because protected " + char *err = "-DENIED Running in protected mode because protected " "mode is enabled and no password is set for the default user. " "In this mode connections are only accepted from the loopback interface. " - "If you want to connect from external computers to Redis you " + "If you want to connect from external computers, you " "may adopt one of the following solutions: " "1) Just disable protected mode sending the command " "'CONFIG SET protected-mode no' from the loopback interface " - "by connecting to Redis from the same host the server is " - "running, however MAKE SURE Redis is not publicly accessible " + "by connecting from the same host the server is " + "running, however MAKE SURE it's not publicly accessible " "from internet if you do so. Use CONFIG REWRITE to make this " "change permanent. " "2) Alternatively you can just disable the protected mode by " - "editing the Redis configuration file, and setting the protected " + "editing the configuration file, and setting the protected " "mode option to 'no', and then restarting the server. " "3) If you started the server manually just for testing, restart " "it with the '--protected-mode no' option. " @@ -1414,29 +1420,29 @@ void freeClientArgv(client *c) { c->argv = NULL; } -/* Close all the slaves connections. This is useful in chained replication - * when we resync with our own master and want to force all our slaves to +/* Close all the replicas connections. This is useful in chained replication + * when we resync with our own primary and want to force all our replicas to * resync with us as well. */ -void disconnectSlaves(void) { +void disconnectReplicas(void) { listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { freeClient((client *)ln->value); } } -/* Check if there is any other slave waiting dumping RDB finished expect me. +/* Check if there is any other replica waiting dumping RDB finished expect me. * This function is useful to judge current dumping RDB can be used for full * synchronization or not. */ -int anyOtherSlaveWaitRdb(client *except_me) { +int anyOtherReplicaWaitRdb(client *except_me) { listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (slave != except_me && slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { + client *replica = ln->value; + if (replica != except_me && replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_END) { return 1; } } @@ -1445,7 +1451,7 @@ int anyOtherSlaveWaitRdb(client *except_me) { /* Remove the specified client from global lists where the client could * be referenced, not including the Pub/Sub channels. - * This is used by freeClient() and replicationCacheMaster(). */ + * This is used by freeClient() and replicationCachePrimary(). */ void unlinkClient(client *c) { listNode *ln; @@ -1466,7 +1472,7 @@ void unlinkClient(client *c) { /* Check if this is a replica waiting for diskless replication (rdb pipe), * in which case it needs to be cleaned from that list */ - if (c->flags & CLIENT_SLAVE && c->replstate == SLAVE_STATE_WAIT_BGSAVE_END && server.rdb_pipe_conns) { + if (c->flags & CLIENT_REPLICA && c->repl_state == REPLICA_STATE_WAIT_BGSAVE_END && server.rdb_pipe_conns) { int i; for (i = 0; i < server.rdb_pipe_numconns; i++) { if (server.rdb_pipe_conns[i] == c->conn) { @@ -1514,7 +1520,7 @@ void unlinkClient(client *c) { void clearClientConnectionState(client *c) { listNode *ln; - /* MONITOR clients are also marked with CLIENT_SLAVE, we need to + /* MONITOR clients are also marked with CLIENT_REPLICA, we need to * distinguish between the two. */ if (c->flags & CLIENT_MONITOR) { @@ -1522,10 +1528,10 @@ void clearClientConnectionState(client *c) { serverAssert(ln != NULL); listDelNode(server.monitors, ln); - c->flags &= ~(CLIENT_MONITOR | CLIENT_SLAVE); + c->flags &= ~(CLIENT_MONITOR | CLIENT_REPLICA); } - serverAssert(!(c->flags & (CLIENT_SLAVE | CLIENT_MASTER))); + serverAssert(!(c->flags & (CLIENT_REPLICA | CLIENT_PRIMARY))); if (c->flags & CLIENT_TRACKING) disableTracking(c); selectDb(c, 0); @@ -1580,7 +1586,7 @@ void freeClient(client *c) { /* If this client was scheduled for async freeing we need to remove it * from the queue. Note that we need to do this here, because later - * we may call replicationCacheMaster() and the client should already + * we may call replicationCachePrimary() and the client should already * be removed from the list of clients to free. */ if (c->flags & CLIENT_CLOSE_ASAP) { ln = listSearchKey(server.clients_to_close, c); @@ -1588,27 +1594,31 @@ void freeClient(client *c) { listDelNode(server.clients_to_close, ln); } - /* If it is our master that's being disconnected we should make sure + /* If it is our primary that's being disconnected we should make sure * to cache the state to try a partial resynchronization later. * * Note that before doing this we make sure that the client is not in * some unexpected state, by checking its flags. */ - if (server.master && c->flags & CLIENT_MASTER) { - serverLog(LL_NOTICE, "Connection with master lost."); + if (server.primary && c->flags & CLIENT_PRIMARY) { + serverLog(LL_NOTICE, "Connection with primary lost."); if (!(c->flags & (CLIENT_PROTOCOL_ERROR | CLIENT_BLOCKED))) { c->flags &= ~(CLIENT_CLOSE_ASAP | CLIENT_CLOSE_AFTER_REPLY); - replicationCacheMaster(c); + replicationCachePrimary(c); return; } } - /* Log link disconnection with slave */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { - serverLog(LL_NOTICE, "Connection with replica %s lost.", replicationGetSlaveName(c)); + /* Log link disconnection with replica */ + if (getClientType(c) == CLIENT_TYPE_REPLICA) { + serverLog(LL_NOTICE, "Connection with replica %s lost.", replicationGetReplicaName(c)); } /* Free the query buffer */ - sdsfree(c->querybuf); + if (c->querybuf && c->querybuf == thread_shared_qb) { + sdsclear(c->querybuf); + } else { + sdsfree(c->querybuf); + } c->querybuf = NULL; /* Deallocate structures used to block on blocking ops. */ @@ -1650,44 +1660,44 @@ void freeClient(client *c) { * places where active clients may be referenced. */ unlinkClient(c); - /* Master/slave cleanup Case 1: - * we lost the connection with a slave. */ - if (c->flags & CLIENT_SLAVE) { - /* If there is no any other slave waiting dumping RDB finished, the + /* Primary/replica cleanup Case 1: + * we lost the connection with a replica. */ + if (c->flags & CLIENT_REPLICA) { + /* If there is no any other replica waiting dumping RDB finished, the * current child process need not continue to dump RDB, then we kill it. * So child process won't use more memory, and we also can fork a new * child process asap to dump rdb for next full synchronization or bgsave. * But we also need to check if users enable 'save' RDB, if enable, we * should not remove directly since that means RDB is important for users * to keep data safe and we may delay configured 'save' for full sync. */ - if (server.saveparamslen == 0 && c->replstate == SLAVE_STATE_WAIT_BGSAVE_END && + if (server.saveparamslen == 0 && c->repl_state == REPLICA_STATE_WAIT_BGSAVE_END && server.child_type == CHILD_TYPE_RDB && server.rdb_child_type == RDB_CHILD_TYPE_DISK && - anyOtherSlaveWaitRdb(c) == 0) { + anyOtherReplicaWaitRdb(c) == 0) { killRDBChild(); } - if (c->replstate == SLAVE_STATE_SEND_BULK) { + if (c->repl_state == REPLICA_STATE_SEND_BULK) { if (c->repldbfd != -1) close(c->repldbfd); if (c->replpreamble) sdsfree(c->replpreamble); } - list *l = (c->flags & CLIENT_MONITOR) ? server.monitors : server.slaves; + list *l = (c->flags & CLIENT_MONITOR) ? server.monitors : server.replicas; ln = listSearchKey(l, c); serverAssert(ln != NULL); listDelNode(l, ln); /* We need to remember the time when we started to have zero - * attached slaves, as after some time we'll free the replication + * attached replicas, as after some time we'll free the replication * backlog. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE && listLength(server.slaves) == 0) - server.repl_no_slaves_since = server.unixtime; - refreshGoodSlavesCount(); + if (getClientType(c) == CLIENT_TYPE_REPLICA && listLength(server.replicas) == 0) + server.repl_no_replicas_since = server.unixtime; + refreshGoodReplicasCount(); /* Fire the replica change modules event. */ - if (c->replstate == SLAVE_STATE_ONLINE) + if (c->repl_state == REPLICA_STATE_ONLINE) moduleFireServerEvent(VALKEYMODULE_EVENT_REPLICA_CHANGE, VALKEYMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE, NULL); } - /* Master/slave cleanup Case 2: - * we lost the connection with the master. */ - if (c->flags & CLIENT_MASTER) replicationHandleMasterDisconnection(); + /* Primary/replica cleanup Case 2: + * we lost the connection with the primary. */ + if (c->flags & CLIENT_PRIMARY) replicationHandlePrimaryDisconnection(); /* Remove client from memory usage buckets */ if (c->mem_usage_bucket) { @@ -1703,7 +1713,7 @@ void freeClient(client *c) { freeClientMultiState(c); sdsfree(c->peerid); sdsfree(c->sockname); - sdsfree(c->slave_addr); + sdsfree(c->replica_addr); zfree(c); } @@ -1884,7 +1894,7 @@ static int _writevToClient(client *c, ssize_t *nwritten) { * to client. */ int _writeToClient(client *c, ssize_t *nwritten) { *nwritten = 0; - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { serverAssert(c->bufpos == 0 && listLength(c->reply) == 0); replBufBlock *o = listNodeValue(c->ref_repl_buf_node); @@ -1943,7 +1953,7 @@ int _writeToClient(client *c, ssize_t *nwritten) { * thread safe. */ int writeToClient(client *c, int handler_installed) { /* Update total number of writes on server */ - atomicIncr(server.stat_total_writes_processed, 1); + atomic_fetch_add_explicit(&server.stat_total_writes_processed, 1, memory_order_relaxed); ssize_t nwritten = 0, totwritten = 0; @@ -1961,17 +1971,17 @@ int writeToClient(client *c, int handler_installed) { * just deliver as much data as it is possible to deliver. * * Moreover, we also send as much as possible if the client is - * a slave or a monitor (otherwise, on high-speed traffic, the + * a replica or a monitor (otherwise, on high-speed traffic, the * replication/output buffer will grow indefinitely) */ if (totwritten > NET_MAX_WRITES_PER_EVENT && - (server.maxmemory == 0 || zmalloc_used_memory() < server.maxmemory) && !(c->flags & CLIENT_SLAVE)) + (server.maxmemory == 0 || zmalloc_used_memory() < server.maxmemory) && !(c->flags & CLIENT_REPLICA)) break; } - if (getClientType(c) == CLIENT_TYPE_SLAVE) { - atomicIncr(server.stat_net_repl_output_bytes, totwritten); + if (getClientType(c) == CLIENT_TYPE_REPLICA) { + atomic_fetch_add_explicit(&server.stat_net_repl_output_bytes, totwritten, memory_order_relaxed); } else { - atomicIncr(server.stat_net_output_bytes, totwritten); + atomic_fetch_add_explicit(&server.stat_net_output_bytes, totwritten, memory_order_relaxed); } c->net_output_bytes += totwritten; @@ -1983,11 +1993,11 @@ int writeToClient(client *c, int handler_installed) { } } if (totwritten > 0) { - /* For clients representing masters we don't count sending data + /* For clients representing primaries we don't count sending data * as an interaction, since we always send REPLCONF ACK commands * that take some time to just fill the socket output buffer. * We just rely on data / pings received for timeout detection. */ - if (!(c->flags & CLIENT_MASTER)) c->lastinteraction = server.unixtime; + if (!(c->flags & CLIENT_PRIMARY)) c->last_interaction = server.unixtime; } if (!clientHasPendingReplies(c)) { c->sentlen = 0; @@ -2063,7 +2073,7 @@ void resetClient(client *c) { c->multibulklen = 0; c->bulklen = -1; c->slot = -1; - c->flags &= ~(CLIENT_EXECUTING_COMMAND | CLIENT_PREREPL_DONE); + c->flags &= ~(CLIENT_EXECUTING_COMMAND | CLIENT_REPLICATION_DONE); /* Make sure the duration has been recorded to some command. */ serverAssert(c->duration == 0); @@ -2093,6 +2103,48 @@ void resetClient(client *c) { } } +/* Initializes the shared query buffer to a new sds with the default capacity */ +void initSharedQueryBuf(void) { + thread_shared_qb = sdsnewlen(NULL, PROTO_IOBUF_LEN); + sdsclear(thread_shared_qb); +} + +/* Resets the shared query buffer used by the given client. + * If any data remained in the buffer, the client will take ownership of the buffer + * and a new empty buffer will be allocated for the shared buffer. */ +void resetSharedQueryBuf(client *c) { + serverAssert(c->querybuf == thread_shared_qb); + size_t remaining = sdslen(c->querybuf) - c->qb_pos; + + if (remaining > 0) { + /* Let the client take ownership of the shared buffer. */ + initSharedQueryBuf(); + return; + } + + c->querybuf = NULL; + sdsclear(thread_shared_qb); + c->qb_pos = 0; +} + +/* Trims the client query buffer to the current position. */ +void trimClientQueryBuffer(client *c) { + if (c->querybuf == thread_shared_qb) { + resetSharedQueryBuf(c); + } + + if (c->querybuf == NULL) { + return; + } + + serverAssert(c->qb_pos <= sdslen(c->querybuf)); + + if (c->qb_pos > 0) { + sdsrange(c->querybuf, c->qb_pos, -1); + c->qb_pos = 0; + } +} + /* This function is used when we want to re-enter the event loop but there * is the risk that the client we are dealing with will be freed in some * way. This happens for instance in: @@ -2164,22 +2216,22 @@ int processInlineBuffer(client *c) { return C_ERR; } - /* Newline from slaves can be used to refresh the last ACK time. - * This is useful for a slave to ping back while loading a big + /* Newline from replicas can be used to refresh the last ACK time. + * This is useful for a replica to ping back while loading a big * RDB file. */ - if (querylen == 0 && getClientType(c) == CLIENT_TYPE_SLAVE) c->repl_ack_time = server.unixtime; + if (querylen == 0 && getClientType(c) == CLIENT_TYPE_REPLICA) c->repl_ack_time = server.unixtime; - /* Masters should never send us inline protocol to run actual + /* Primaries should never send us inline protocol to run actual * commands. If this happens, it is likely due to a bug in the server where * we got some desynchronization in the protocol, for example * because of a PSYNC gone bad. * - * However there is an exception: masters may send us just a newline + * However there is an exception: primaries may send us just a newline * to keep the connection active. */ - if (querylen != 0 && c->flags & CLIENT_MASTER) { + if (querylen != 0 && c->flags & CLIENT_PRIMARY) { sdsfreesplitres(argv, argc); - serverLog(LL_WARNING, "WARNING: Receiving inline protocol from master, master stream corruption? Closing the " - "master connection and discarding the cached master."); + serverLog(LL_WARNING, "WARNING: Receiving inline protocol from primary, primary stream corruption? Closing the " + "primary connection and discarding the cached primary."); setProtocolError("Master using the inline protocol. Desync?", c); return C_ERR; } @@ -2210,7 +2262,7 @@ int processInlineBuffer(client *c) { * CLIENT_PROTOCOL_ERROR. */ #define PROTO_DUMP_LEN 128 static void setProtocolError(const char *errstr, client *c) { - if (server.verbosity <= LL_VERBOSE || c->flags & CLIENT_MASTER) { + if (server.verbosity <= LL_VERBOSE || c->flags & CLIENT_PRIMARY) { sds client = catClientInfoString(sdsempty(), c); /* Sample some protocol to given an idea about what was inside. */ @@ -2231,7 +2283,7 @@ static void setProtocolError(const char *errstr, client *c) { } /* Log all the client and protocol info. */ - int loglevel = (c->flags & CLIENT_MASTER) ? LL_WARNING : LL_VERBOSE; + int loglevel = (c->flags & CLIENT_PRIMARY) ? LL_WARNING : LL_VERBOSE; serverLog(loglevel, "Protocol error (%s) from client: %s. %s", errstr, client, buf); sdsfree(client); } @@ -2322,7 +2374,7 @@ int processMultibulkBuffer(client *c) { } ok = string2ll(c->querybuf + c->qb_pos + 1, newline - (c->querybuf + c->qb_pos + 1), &ll); - if (!ok || ll < 0 || (!(c->flags & CLIENT_MASTER) && ll > server.proto_max_bulk_len)) { + if (!ok || ll < 0 || (!(c->flags & CLIENT_PRIMARY) && ll > server.proto_max_bulk_len)) { addReplyError(c, "Protocol error: invalid bulk length"); setProtocolError("invalid bulk length", c); return C_ERR; @@ -2333,8 +2385,8 @@ int processMultibulkBuffer(client *c) { } c->qb_pos = newline - c->querybuf + 2; - if (!(c->flags & CLIENT_MASTER) && ll >= PROTO_MBULK_BIG_ARG) { - /* When the client is not a master client (because master + if (!(c->flags & CLIENT_PRIMARY) && ll >= PROTO_MBULK_BIG_ARG) { + /* When the client is not a primary client (because primary * client's querybuf can only be trimmed after data applied * and sent to replicas). * @@ -2348,6 +2400,10 @@ int processMultibulkBuffer(client *c) { * ll+2, trimming querybuf is just a waste of time, because * at this time the querybuf contains not only our bulk. */ if (sdslen(c->querybuf) - c->qb_pos <= (size_t)ll + 2) { + if (c->querybuf == thread_shared_qb) { + /* Let the client take the ownership of the shared buffer. */ + initSharedQueryBuf(); + } sdsrange(c->querybuf, c->qb_pos, -1); c->qb_pos = 0; /* Hint the sds library about the amount of bytes this string is @@ -2372,10 +2428,10 @@ int processMultibulkBuffer(client *c) { c->argv = zrealloc(c->argv, sizeof(robj *) * c->argv_len); } - /* Optimization: if a non-master client's buffer contains JUST our bulk element + /* Optimization: if a non-primary client's buffer contains JUST our bulk element * instead of creating a new object by *copying* the sds we * just use the current sds string. */ - if (!(c->flags & CLIENT_MASTER) && c->qb_pos == 0 && c->bulklen >= PROTO_MBULK_BIG_ARG && + if (!(c->flags & CLIENT_PRIMARY) && c->qb_pos == 0 && c->bulklen >= PROTO_MBULK_BIG_ARG && sdslen(c->querybuf) == (size_t)(c->bulklen + 2)) { c->argv[c->argc++] = createObject(OBJ_STRING, c->querybuf); c->argv_len_sum += c->bulklen; @@ -2404,8 +2460,8 @@ int processMultibulkBuffer(client *c) { /* Perform necessary tasks after a command was executed: * * 1. The client is reset unless there are reasons to avoid doing it. - * 2. In the case of master clients, the replication offset is updated. - * 3. Propagate commands we got from our master to replicas down the line. */ + * 2. In the case of primary clients, the replication offset is updated. + * 3. Propagate commands we got from our primary to replicas down the line. */ void commandProcessed(client *c) { /* If client is blocked(including paused), just return avoid reset and replicate. * @@ -2420,21 +2476,21 @@ void commandProcessed(client *c) { resetClient(c); long long prev_offset = c->reploff; - if (c->flags & CLIENT_MASTER && !(c->flags & CLIENT_MULTI)) { - /* Update the applied replication offset of our master. */ + if (c->flags & CLIENT_PRIMARY && !(c->flags & CLIENT_MULTI)) { + /* Update the applied replication offset of our primary. */ c->reploff = c->read_reploff - sdslen(c->querybuf) + c->qb_pos; } - /* If the client is a master we need to compute the difference + /* If the client is a primary we need to compute the difference * between the applied offset before and after processing the buffer, * to understand how much of the replication stream was actually - * applied to the master state: this quantity, and its corresponding + * applied to the primary state: this quantity, and its corresponding * part of the replication stream, will be propagated to the * sub-replicas and to the replication backlog. */ - if (c->flags & CLIENT_MASTER) { + if (c->flags & CLIENT_PRIMARY) { long long applied = c->reploff - prev_offset; if (applied) { - replicationFeedStreamFromMasterStream(c->querybuf + c->repl_applied, applied); + replicationFeedStreamFromPrimaryStream(c->querybuf + c->repl_applied, applied); c->repl_applied += applied; } } @@ -2468,8 +2524,8 @@ int processCommandAndResetClient(client *c) { * is dead and will stop reading from its buffer. */ server.current_client = old_client; - /* performEvictions may flush slave output buffers. This may - * result in a slave, that may be the active client, to be + /* performEvictions may flush replica output buffers. This may + * result in a replica, that may be the active client, to be * freed. */ return deadclient ? C_ERR : C_OK; } @@ -2492,7 +2548,7 @@ int processPendingCommandAndInputBuffer(client *c) { /* Now process client if it has more data in it's buffer. * - * Note: when a master client steps into this function, + * Note: when a primary client steps into this function, * it can always satisfy this condition, because its querybuf * contains data not applied. */ if (c->querybuf && sdslen(c->querybuf) > 0) { @@ -2508,7 +2564,7 @@ int processPendingCommandAndInputBuffer(client *c) { * return C_ERR in case the client was freed during the processing */ int processInputBuffer(client *c) { /* Keep processing while there is something in the input buffer */ - while (c->qb_pos < sdslen(c->querybuf)) { + while (c->querybuf && c->qb_pos < sdslen(c->querybuf)) { /* Immediately abort if the client is in the middle of something. */ if (c->flags & CLIENT_BLOCKED) break; @@ -2516,11 +2572,11 @@ int processInputBuffer(client *c) { * commands to execute in c->argv. */ if (c->flags & CLIENT_PENDING_COMMAND) break; - /* Don't process input from the master while there is a busy script - * condition on the slave. We want just to accumulate the replication + /* Don't process input from the primary while there is a busy script + * condition on the replica. We want just to accumulate the replication * stream (instead of replying -BUSY like we do with other clients) and * later resume the processing. */ - if (isInsideYieldingLongCommand() && c->flags & CLIENT_MASTER) break; + if (isInsideYieldingLongCommand() && c->flags & CLIENT_PRIMARY) break; /* CLIENT_CLOSE_AFTER_REPLY closes the connection once the reply is * written to the client. Make sure to not let the reply grow after @@ -2559,6 +2615,13 @@ int processInputBuffer(client *c) { break; } + if (c->querybuf == thread_shared_qb) { + /* Before processing the command, reset the shared query buffer to its default state. + * This avoids unintentionally modifying the shared qb during processCommand as we may use + * the shared qb for other clients during processEventsWhileBlocked */ + resetSharedQueryBuf(c); + } + /* We are finally ready to execute the command. */ if (processCommandAndResetClient(c) == C_ERR) { /* If the client is no longer valid, we avoid exiting this @@ -2569,15 +2632,15 @@ int processInputBuffer(client *c) { } } - if (c->flags & CLIENT_MASTER) { - /* If the client is a master, trim the querybuf to repl_applied, - * since master client is very special, its querybuf not only + if (c->flags & CLIENT_PRIMARY) { + /* If the client is a primary, trim the querybuf to repl_applied, + * since primary client is very special, its querybuf not only * used to parse command, but also proxy to sub-replicas. * * Here are some scenarios we cannot trim to qb_pos: - * 1. we don't receive complete command from master - * 2. master client blocked cause of client pause - * 3. io threads operate read, master client flagged with CLIENT_PENDING_COMMAND + * 1. we don't receive complete command from primary + * 2. primary client blocked cause of client pause + * 3. io threads operate read, primary client flagged with CLIENT_PENDING_COMMAND * * In these scenarios, qb_pos points to the part of the current command * or the beginning of next command, and the current command is not applied yet, @@ -2587,10 +2650,8 @@ int processInputBuffer(client *c) { c->qb_pos -= c->repl_applied; c->repl_applied = 0; } - } else if (c->qb_pos) { - /* Trim to pos */ - sdsrange(c->querybuf, c->qb_pos, -1); - c->qb_pos = 0; + } else { + trimClientQueryBuffer(c); } /* Update client memory usage after processing the query buffer, this is @@ -2611,30 +2672,37 @@ void readQueryFromClient(connection *conn) { if (postponeClientRead(c)) return; /* Update total number of reads on server */ - atomicIncr(server.stat_total_reads_processed, 1); + atomic_fetch_add_explicit(&server.stat_total_reads_processed, 1, memory_order_relaxed); readlen = PROTO_IOBUF_LEN; + qblen = c->querybuf ? sdslen(c->querybuf) : 0; /* If this is a multi bulk request, and we are processing a bulk reply * that is large enough, try to maximize the probability that the query * buffer contains exactly the SDS string representing the object, even * at the risk of requiring more read(2) calls. This way the function * processMultiBulkBuffer() can avoid copying buffers to create the * robj representing the argument. */ + if (c->reqtype == PROTO_REQ_MULTIBULK && c->multibulklen && c->bulklen != -1 && c->bulklen >= PROTO_MBULK_BIG_ARG) { - ssize_t remaining = (size_t)(c->bulklen + 2) - (sdslen(c->querybuf) - c->qb_pos); + ssize_t remaining = (size_t)(c->bulklen + 2) - (qblen - c->qb_pos); big_arg = 1; /* Note that the 'remaining' variable may be zero in some edge case, * for example once we resume a blocked client after CLIENT PAUSE. */ if (remaining > 0) readlen = remaining; - /* Master client needs expand the readlen when meet BIG_ARG(see #9100), + /* Primary client needs expand the readlen when meet BIG_ARG(see #9100), * but doesn't need align to the next arg, we can read more data. */ - if (c->flags & CLIENT_MASTER && readlen < PROTO_IOBUF_LEN) readlen = PROTO_IOBUF_LEN; + if (c->flags & CLIENT_PRIMARY && readlen < PROTO_IOBUF_LEN) readlen = PROTO_IOBUF_LEN; } - qblen = sdslen(c->querybuf); - if (!(c->flags & CLIENT_MASTER) && // master client's querybuf can grow greedy. + if (c->querybuf == NULL) { + serverAssert(sdslen(thread_shared_qb) == 0); + c->querybuf = big_arg ? sdsempty() : thread_shared_qb; + qblen = sdslen(c->querybuf); + } + + if (!(c->flags & CLIENT_PRIMARY) && // primary client's querybuf can grow greedy. (big_arg || sdsalloc(c->querybuf) < PROTO_IOBUF_LEN)) { /* When reading a BIG_ARG we won't be reading more than that one arg * into the query buffer, so we don't need to pre-allocate more than we @@ -2654,7 +2722,7 @@ void readQueryFromClient(connection *conn) { nread = connRead(c->conn, c->querybuf + qblen, readlen); if (nread == -1) { if (connGetState(conn) == CONN_STATE_CONNECTED) { - return; + goto done; } else { serverLog(LL_VERBOSE, "Reading from client: %s", connGetLastError(c->conn)); freeClientAsync(c); @@ -2674,16 +2742,16 @@ void readQueryFromClient(connection *conn) { qblen = sdslen(c->querybuf); if (c->querybuf_peak < qblen) c->querybuf_peak = qblen; - c->lastinteraction = server.unixtime; - if (c->flags & CLIENT_MASTER) { + c->last_interaction = server.unixtime; + if (c->flags & CLIENT_PRIMARY) { c->read_reploff += nread; - atomicIncr(server.stat_net_repl_input_bytes, nread); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes, nread, memory_order_relaxed); } else { - atomicIncr(server.stat_net_input_bytes, nread); + atomic_fetch_add_explicit(&server.stat_net_input_bytes, nread, memory_order_relaxed); } c->net_input_bytes += nread; - if (!(c->flags & CLIENT_MASTER) && + if (!(c->flags & CLIENT_PRIMARY) && /* The commands cached in the MULTI/EXEC queue have not been executed yet, * so they are also considered a part of the query buffer in a broader sense. * @@ -2698,7 +2766,7 @@ void readQueryFromClient(connection *conn) { sdsfree(ci); sdsfree(bytes); freeClientAsync(c); - atomicIncr(server.stat_client_qbuf_limit_disconnections, 1); + atomic_fetch_add_explicit(&server.stat_client_qbuf_limit_disconnections, 1, memory_order_relaxed); goto done; } @@ -2707,6 +2775,10 @@ void readQueryFromClient(connection *conn) { if (processInputBuffer(c) == C_ERR) c = NULL; done: + if (c && c->querybuf == thread_shared_qb) { + sdsclear(thread_shared_qb); + c->querybuf = NULL; + } beforeNextClient(c); } @@ -2765,14 +2837,13 @@ sds catClientInfoString(sds s, client *client) { char flags[17], events[3], conninfo[CONN_INFO_LEN], *p; p = flags; - if (client->flags & CLIENT_SLAVE) { + if (client->flags & CLIENT_REPLICA) { if (client->flags & CLIENT_MONITOR) *p++ = 'O'; else *p++ = 'S'; } - /* clang-format off */ - if (client->flags & CLIENT_MASTER) *p++ = 'M'; + if (client->flags & CLIENT_PRIMARY) *p++ = 'M'; if (client->flags & CLIENT_PUBSUB) *p++ = 'P'; if (client->flags & CLIENT_MULTI) *p++ = 'x'; if (client->flags & CLIENT_BLOCKED) *p++ = 'b'; @@ -2788,7 +2859,6 @@ sds catClientInfoString(sds s, client *client) { if (client->flags & CLIENT_NO_EVICT) *p++ = 'e'; if (client->flags & CLIENT_NO_TOUCH) *p++ = 'T'; if (p == flags) *p++ = 'N'; - /* clang-format on */ *p++ = '\0'; p = events; @@ -2816,7 +2886,7 @@ sds catClientInfoString(sds s, client *client) { " %s", connGetInfo(client->conn, conninfo, sizeof(conninfo)), " name=%s", client->name ? (char*)client->name->ptr : "", " age=%I", (long long)(commandTimeSnapshot() / 1000 - client->ctime), - " idle=%I", (long long)(server.unixtime - client->lastinteraction), + " idle=%I", (long long)(server.unixtime - client->last_interaction), " flags=%s", flags, " db=%i", client->db->id, " sub=%i", (int) dictSize(client->pubsub_channels), @@ -2824,8 +2894,8 @@ sds catClientInfoString(sds s, client *client) { " ssub=%i", (int) dictSize(client->pubsubshard_channels), " multi=%i", (client->flags & CLIENT_MULTI) ? client->mstate.count : -1, " watch=%i", (int) listLength(client->watched_keys), - " qbuf=%U", (unsigned long long) sdslen(client->querybuf), - " qbuf-free=%U", (unsigned long long) sdsavail(client->querybuf), + " qbuf=%U", client->querybuf ? (unsigned long long) sdslen(client->querybuf) : 0, + " qbuf-free=%U", client->querybuf ? (unsigned long long) sdsavail(client->querybuf) : 0, " argv-mem=%U", (unsigned long long) client->argv_len_sum, " multi-mem=%U", (unsigned long long) client->mstate.argv_len_sums, " rbs=%U", (unsigned long long) client->buf_usable_size, @@ -2959,13 +3029,13 @@ void clientSetinfoCommand(client *c) { /* Reset the client state to resemble a newly connected client. */ void resetCommand(client *c) { - /* MONITOR clients are also marked with CLIENT_SLAVE, we need to + /* MONITOR clients are also marked with CLIENT_REPLICA, we need to * distinguish between the two. */ uint64_t flags = c->flags; - if (flags & CLIENT_MONITOR) flags &= ~(CLIENT_MONITOR | CLIENT_SLAVE); + if (flags & CLIENT_MONITOR) flags &= ~(CLIENT_MONITOR | CLIENT_REPLICA); - if (flags & (CLIENT_SLAVE | CLIENT_MASTER | CLIENT_MODULE)) { + if (flags & (CLIENT_REPLICA | CLIENT_PRIMARY | CLIENT_MODULE)) { addReplyError(c, "can only reset normal client connections"); return; } @@ -3197,15 +3267,13 @@ NULL listRewind(server.clients, &li); while ((ln = listNext(&li)) != NULL) { client *client = listNodeValue(ln); - /* clang-format off */ - if (addr && strcmp(getClientPeerId(client),addr) != 0) continue; - if (laddr && strcmp(getClientSockname(client),laddr) != 0) continue; + if (addr && strcmp(getClientPeerId(client), addr) != 0) continue; + if (laddr && strcmp(getClientSockname(client), laddr) != 0) continue; if (type != -1 && getClientType(client) != type) continue; if (id != 0 && client->id != id) continue; if (user && client->user != user) continue; if (c == client && skipme) continue; if (max_age != 0 && (long long)(commandTimeSnapshot() / 1000 - client->ctime) < max_age) continue; - /* clang-format on */ /* Kill it. */ if (c == client) { @@ -3574,7 +3642,7 @@ void helloCommand(client *c) { } /* At this point we need to be authenticated to continue. */ - if (!c->authenticated) { + if (!(c->flags & CLIENT_AUTHENTICATED)) { addReplyError(c, "-NOAUTH HELLO must be called with the client already " "authenticated, otherwise the HELLO AUTH " "option can be used to authenticate the client and " @@ -3611,7 +3679,7 @@ void helloCommand(client *c) { if (!server.sentinel_mode) { addReplyBulkCString(c, "role"); - addReplyBulkCString(c, server.masterhost ? "replica" : "master"); + addReplyBulkCString(c, server.primary_host ? "replica" : "master"); } addReplyBulkCString(c, "modules"); @@ -3758,7 +3826,7 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) { * the caller wishes. The main usage of this function currently is * enforcing the client output length limits. */ size_t getClientOutputBufferMemoryUsage(client *c) { - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { size_t repl_buf_size = 0; size_t repl_node_num = 0; size_t repl_node_size = sizeof(listNode) + sizeof(replBufBlock); @@ -3780,8 +3848,9 @@ size_t getClientOutputBufferMemoryUsage(client *c) { * the client output buffer memory usage portion of the total. */ size_t getClientMemoryUsage(client *c, size_t *output_buffer_mem_usage) { size_t mem = getClientOutputBufferMemoryUsage(c); + if (output_buffer_mem_usage != NULL) *output_buffer_mem_usage = mem; - mem += sdsZmallocSize(c->querybuf); + mem += c->querybuf ? sdsZmallocSize(c->querybuf) : 0; mem += zmalloc_size(c); mem += c->buf_usable_size; /* For efficiency (less work keeping track of the argv memory), it doesn't include the used memory @@ -3807,15 +3876,15 @@ size_t getClientMemoryUsage(client *c, size_t *output_buffer_mem_usage) { * * The function will return one of the following: * CLIENT_TYPE_NORMAL -> Normal client, including MONITOR - * CLIENT_TYPE_SLAVE -> Slave + * CLIENT_TYPE_REPLICA -> replica * CLIENT_TYPE_PUBSUB -> Client subscribed to Pub/Sub channels - * CLIENT_TYPE_MASTER -> The client representing our replication master. + * CLIENT_TYPE_PRIMARY -> The client representing our replication primary. */ int getClientType(client *c) { - if (c->flags & CLIENT_MASTER) return CLIENT_TYPE_MASTER; + if (c->flags & CLIENT_PRIMARY) return CLIENT_TYPE_PRIMARY; /* Even though MONITOR clients are marked as replicas, we * want the expose them as normal clients. */ - if ((c->flags & CLIENT_SLAVE) && !(c->flags & CLIENT_MONITOR)) return CLIENT_TYPE_SLAVE; + if ((c->flags & CLIENT_REPLICA) && !(c->flags & CLIENT_MONITOR)) return CLIENT_TYPE_REPLICA; if (c->flags & CLIENT_PUBSUB) return CLIENT_TYPE_PUBSUB; return CLIENT_TYPE_NORMAL; } @@ -3824,13 +3893,13 @@ int getClientTypeByName(char *name) { if (!strcasecmp(name, "normal")) return CLIENT_TYPE_NORMAL; else if (!strcasecmp(name, "slave")) - return CLIENT_TYPE_SLAVE; + return CLIENT_TYPE_REPLICA; else if (!strcasecmp(name, "replica")) - return CLIENT_TYPE_SLAVE; + return CLIENT_TYPE_REPLICA; else if (!strcasecmp(name, "pubsub")) return CLIENT_TYPE_PUBSUB; else if (!strcasecmp(name, "master")) - return CLIENT_TYPE_MASTER; + return CLIENT_TYPE_PRIMARY; else return -1; } @@ -3838,9 +3907,9 @@ int getClientTypeByName(char *name) { char *getClientTypeName(int class) { switch (class) { case CLIENT_TYPE_NORMAL: return "normal"; - case CLIENT_TYPE_SLAVE: return "slave"; + case CLIENT_TYPE_REPLICA: return "slave"; case CLIENT_TYPE_PUBSUB: return "pubsub"; - case CLIENT_TYPE_MASTER: return "master"; + case CLIENT_TYPE_PRIMARY: return "master"; default: return NULL; } } @@ -3856,9 +3925,9 @@ int checkClientOutputBufferLimits(client *c) { unsigned long used_mem = getClientOutputBufferMemoryUsage(c); class = getClientType(c); - /* For the purpose of output buffer limiting, masters are handled + /* For the purpose of output buffer limiting, primaries are handled * like normal clients. */ - if (class == CLIENT_TYPE_MASTER) class = CLIENT_TYPE_NORMAL; + if (class == CLIENT_TYPE_PRIMARY) class = CLIENT_TYPE_NORMAL; /* Note that it doesn't make sense to set the replica clients output buffer * limit lower than the repl-backlog-size config (partial sync will succeed @@ -3867,7 +3936,7 @@ int checkClientOutputBufferLimits(client *c) { * This doesn't have memory consumption implications since the replica client * will share the backlog buffers memory. */ size_t hard_limit_bytes = server.client_obuf_limits[class].hard_limit_bytes; - if (class == CLIENT_TYPE_SLAVE && hard_limit_bytes && (long long)hard_limit_bytes < server.repl_backlog_size) + if (class == CLIENT_TYPE_REPLICA && hard_limit_bytes && (long long)hard_limit_bytes < server.repl_backlog_size) hard_limit_bytes = server.repl_backlog_size; if (server.client_obuf_limits[class].hard_limit_bytes && used_mem >= hard_limit_bytes) hard = 1; if (server.client_obuf_limits[class].soft_limit_bytes && @@ -3911,7 +3980,7 @@ int closeClientOnOutputBufferLimitReached(client *c, int async) { serverAssert(c->reply_bytes < SIZE_MAX - (1024 * 64)); /* Note that c->reply_bytes is irrelevant for replica clients * (they use the global repl buffers). */ - if ((c->reply_bytes == 0 && getClientType(c) != CLIENT_TYPE_SLAVE) || c->flags & CLIENT_CLOSE_ASAP) return 0; + if ((c->reply_bytes == 0 && getClientType(c) != CLIENT_TYPE_REPLICA) || c->flags & CLIENT_CLOSE_ASAP) return 0; if (checkClientOutputBufferLimits(c)) { sds client = catClientInfoString(sdsempty(), c); @@ -3930,18 +3999,18 @@ int closeClientOnOutputBufferLimitReached(client *c, int async) { return 0; } -/* Helper function used by performEvictions() in order to flush slaves +/* Helper function used by performEvictions() in order to flush replicas * output buffers without returning control to the event loop. * This is also called by SHUTDOWN for a best-effort attempt to send - * slaves the latest writes. */ -void flushSlavesOutputBuffers(void) { + * replicas the latest writes. */ +void flushReplicasOutputBuffers(void) { listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = listNodeValue(ln); - int can_receive_writes = connHasWriteHandler(slave->conn) || (slave->flags & CLIENT_PENDING_WRITE); + client *replica = listNodeValue(ln); + int can_receive_writes = connHasWriteHandler(replica->conn) || (replica->flags & CLIENT_PENDING_WRITE); /* We don't want to send the pending data to the replica in a few * cases: @@ -3955,11 +4024,11 @@ void flushSlavesOutputBuffers(void) { * to send data to the replica in this case, please grep for the * flag for this flag. * - * 3. Obviously if the slave is not ONLINE. + * 3. Obviously if the replica is not ONLINE. */ - if (slave->replstate == SLAVE_STATE_ONLINE && !(slave->flags & CLIENT_CLOSE_ASAP) && can_receive_writes && - !slave->repl_start_cmd_stream_on_ack && clientHasPendingReplies(slave)) { - writeToClient(slave, 0); + if (replica->repl_state == REPLICA_STATE_ONLINE && !(replica->flags & CLIENT_CLOSE_ASAP) && + can_receive_writes && !replica->repl_start_cmd_stream_on_ack && clientHasPendingReplies(replica)) { + writeToClient(replica, 0); } } } @@ -4071,7 +4140,7 @@ uint32_t isPausedActionsWithUpdate(uint32_t actions_bitmask) { /* This function is called by the server in order to process a few events from * time to time while blocked into some not interruptible operation. * This allows to reply to clients with the -LOADING error while loading the - * data set at startup or after a full resynchronization with the master + * data set at startup or after a full resynchronization with the primary * and so forth. * * It calls the event loop in order to process a few events. Specifically we @@ -4135,14 +4204,14 @@ void processEventsWhileBlocked(void) { #endif typedef struct __attribute__((aligned(CACHE_LINE_SIZE))) threads_pending { - serverAtomic unsigned long value; + _Atomic unsigned long value; } threads_pending; pthread_t io_threads[IO_THREADS_MAX_NUM]; pthread_mutex_t io_threads_mutex[IO_THREADS_MAX_NUM]; threads_pending io_threads_pending[IO_THREADS_MAX_NUM]; int io_threads_op; - /* IO_THREADS_OP_IDLE, IO_THREADS_OP_READ or IO_THREADS_OP_WRITE. */ // TODO: should access to this be atomic??! +/* IO_THREADS_OP_IDLE, IO_THREADS_OP_READ or IO_THREADS_OP_WRITE. */ // TODO: should access to this be atomic??! /* This is the list of clients each thread will serve when threaded I/O is * used. We spawn io_threads_num-1 threads, since one is the main thread @@ -4150,13 +4219,12 @@ int io_threads_op; list *io_threads_list[IO_THREADS_MAX_NUM]; static inline unsigned long getIOPendingCount(int i) { - unsigned long count = 0; - atomicGetWithSync(io_threads_pending[i].value, count); + unsigned long count = atomic_load(&io_threads_pending[i].value); return count; } static inline void setIOPendingCount(int i, unsigned long count) { - atomicSetWithSync(io_threads_pending[i].value, count); + atomic_store(&io_threads_pending[i].value, count); } void *IOThreadMain(void *myid) { @@ -4169,6 +4237,7 @@ void *IOThreadMain(void *myid) { valkey_set_thread_title(thdname); serverSetCpuAffinity(server.server_cpulist); makeThreadKillable(); + initSharedQueryBuf(); while (1) { /* Wait for start */ @@ -4335,7 +4404,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { * buffer, to guarantee data accessing thread safe, we must put all * replicas client into io_threads_list[0] i.e. main thread handles * sending the output buffer of all replicas. */ - if (getClientType(c) == CLIENT_TYPE_SLAVE) { + if (getClientType(c) == CLIENT_TYPE_REPLICA) { listAddNodeTail(io_threads_list[0], c); continue; } @@ -4401,7 +4470,7 @@ int handleClientsWithPendingWritesUsingThreads(void) { * pending read clients and flagged as such. */ int postponeClientRead(client *c) { if (server.io_threads_active && server.io_threads_do_reads && !ProcessingEventsWhileBlocked && - !(c->flags & (CLIENT_MASTER | CLIENT_SLAVE | CLIENT_BLOCKED)) && io_threads_op == IO_THREADS_OP_IDLE) { + !(c->flags & (CLIENT_PRIMARY | CLIENT_REPLICA | CLIENT_BLOCKED)) && io_threads_op == IO_THREADS_OP_IDLE) { listAddNodeHead(server.clients_pending_read, c); c->pending_read_list_node = listFirst(server.clients_pending_read); return 1; diff --git a/src/notify.c b/src/notify.c index 72018908fc..1cbf9c74ed 100644 --- a/src/notify.c +++ b/src/notify.c @@ -42,8 +42,7 @@ int keyspaceEventsStringToFlags(char *classes) { int c, flags = 0; while ((c = *p++) != '\0') { - /* clang-format off */ - switch(c) { + switch (c) { case 'A': flags |= NOTIFY_ALL; break; case 'g': flags |= NOTIFY_GENERIC; break; case '$': flags |= NOTIFY_STRING; break; @@ -61,7 +60,6 @@ int keyspaceEventsStringToFlags(char *classes) { case 'n': flags |= NOTIFY_NEW; break; default: return -1; } - /* clang-format on */ } return flags; } @@ -73,28 +71,26 @@ int keyspaceEventsStringToFlags(char *classes) { sds keyspaceEventsFlagsToString(int flags) { sds res; - /* clang-format off */ res = sdsempty(); if ((flags & NOTIFY_ALL) == NOTIFY_ALL) { - res = sdscatlen(res,"A",1); + res = sdscatlen(res, "A", 1); } else { - if (flags & NOTIFY_GENERIC) res = sdscatlen(res,"g",1); - if (flags & NOTIFY_STRING) res = sdscatlen(res,"$",1); - if (flags & NOTIFY_LIST) res = sdscatlen(res,"l",1); - if (flags & NOTIFY_SET) res = sdscatlen(res,"s",1); - if (flags & NOTIFY_HASH) res = sdscatlen(res,"h",1); - if (flags & NOTIFY_ZSET) res = sdscatlen(res,"z",1); - if (flags & NOTIFY_EXPIRED) res = sdscatlen(res,"x",1); - if (flags & NOTIFY_EVICTED) res = sdscatlen(res,"e",1); - if (flags & NOTIFY_STREAM) res = sdscatlen(res,"t",1); - if (flags & NOTIFY_MODULE) res = sdscatlen(res,"d",1); - if (flags & NOTIFY_NEW) res = sdscatlen(res,"n",1); + if (flags & NOTIFY_GENERIC) res = sdscatlen(res, "g", 1); + if (flags & NOTIFY_STRING) res = sdscatlen(res, "$", 1); + if (flags & NOTIFY_LIST) res = sdscatlen(res, "l", 1); + if (flags & NOTIFY_SET) res = sdscatlen(res, "s", 1); + if (flags & NOTIFY_HASH) res = sdscatlen(res, "h", 1); + if (flags & NOTIFY_ZSET) res = sdscatlen(res, "z", 1); + if (flags & NOTIFY_EXPIRED) res = sdscatlen(res, "x", 1); + if (flags & NOTIFY_EVICTED) res = sdscatlen(res, "e", 1); + if (flags & NOTIFY_STREAM) res = sdscatlen(res, "t", 1); + if (flags & NOTIFY_MODULE) res = sdscatlen(res, "d", 1); + if (flags & NOTIFY_NEW) res = sdscatlen(res, "n", 1); } - if (flags & NOTIFY_KEYSPACE) res = sdscatlen(res,"K",1); - if (flags & NOTIFY_KEYEVENT) res = sdscatlen(res,"E",1); - if (flags & NOTIFY_KEY_MISS) res = sdscatlen(res,"m",1); + if (flags & NOTIFY_KEYSPACE) res = sdscatlen(res, "K", 1); + if (flags & NOTIFY_KEYEVENT) res = sdscatlen(res, "E", 1); + if (flags & NOTIFY_KEY_MISS) res = sdscatlen(res, "m", 1); return res; - /* clang-format on */ } /* The API provided to the rest of the serer core is a simple function: diff --git a/src/object.c b/src/object.c index 1a335edd6d..7f93c3768d 100644 --- a/src/object.c +++ b/src/object.c @@ -372,8 +372,7 @@ void incrRefCount(robj *o) { void decrRefCount(robj *o) { if (o->refcount == 1) { - /* clang-format off */ - switch(o->type) { + switch (o->type) { case OBJ_STRING: freeStringObject(o); break; case OBJ_LIST: freeListObject(o); break; case OBJ_SET: freeSetObject(o); break; @@ -383,7 +382,6 @@ void decrRefCount(robj *o) { case OBJ_STREAM: freeStreamObject(o); break; default: serverPanic("Unknown object type"); break; } - /* clang-format on */ zfree(o); } else { if (o->refcount <= 0) serverPanic("decrRefCount against refcount <= 0"); @@ -552,8 +550,7 @@ void dismissObject(robj *o, size_t size_hint) { * so we avoid these pointless loops when they're not going to do anything. */ #if defined(USE_JEMALLOC) && defined(__linux__) if (o->refcount != 1) return; - /* clang-format off */ - switch(o->type) { + switch (o->type) { case OBJ_STRING: dismissStringObject(o); break; case OBJ_LIST: dismissListObject(o, size_hint); break; case OBJ_SET: dismissSetObject(o, size_hint); break; @@ -562,7 +559,6 @@ void dismissObject(robj *o, size_t size_hint) { case OBJ_STREAM: dismissStreamObject(o, size_hint); break; default: break; } - /* clang-format on */ #else UNUSED(o); UNUSED(size_hint); @@ -647,8 +643,7 @@ robj *tryObjectEncodingEx(robj *o, int try_trim) { * Note that we avoid using shared integers when maxmemory is used * because every object needs to have a private LRU field for the LRU * algorithm to work well. */ - if ((server.maxmemory == 0 || !(server.maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) && value >= 0 && - value < OBJ_SHARED_INTEGERS) { + if (canUseSharedObject() && value >= 0 && value < OBJ_SHARED_INTEGERS) { decrRefCount(o); return shared.integers[value]; } else { @@ -931,8 +926,7 @@ int getIntFromObjectOrReply(client *c, robj *o, int *target, const char *msg) { } char *strEncoding(int encoding) { - /* clang-format off */ - switch(encoding) { + switch (encoding) { case OBJ_ENCODING_RAW: return "raw"; case OBJ_ENCODING_INT: return "int"; case OBJ_ENCODING_HT: return "hashtable"; @@ -944,7 +938,6 @@ char *strEncoding(int encoding) { case OBJ_ENCODING_STREAM: return "stream"; default: return "unknown"; } - /* clang-format on */ } /* =========================== Memory introspection ========================= */ @@ -1174,11 +1167,11 @@ struct serverMemOverhead *getMemoryOverheadData(void) { * only if replication buffer memory is more than the repl backlog setting, * we consider the excess as replicas' memory. Otherwise, replication buffer * memory is the consumption of repl backlog. */ - if (listLength(server.slaves) && (long long)server.repl_buffer_mem > server.repl_backlog_size) { - mh->clients_slaves = server.repl_buffer_mem - server.repl_backlog_size; + if (listLength(server.replicas) && (long long)server.repl_buffer_mem > server.repl_backlog_size) { + mh->clients_replicas = server.repl_buffer_mem - server.repl_backlog_size; mh->repl_backlog = server.repl_backlog_size; } else { - mh->clients_slaves = 0; + mh->clients_replicas = 0; mh->repl_backlog = server.repl_buffer_mem; } if (server.repl_backlog) { @@ -1187,12 +1180,12 @@ struct serverMemOverhead *getMemoryOverheadData(void) { raxSize(server.repl_backlog->blocks_index) * sizeof(void *); } mem_total += mh->repl_backlog; - mem_total += mh->clients_slaves; + mem_total += mh->clients_replicas; /* Computing the memory used by the clients would be O(N) if done * here online. We use our values computed incrementally by * updateClientMemoryUsage(). */ - mh->clients_normal = server.stat_clients_type_memory[CLIENT_TYPE_MASTER] + + mh->clients_normal = server.stat_clients_type_memory[CLIENT_TYPE_PRIMARY] + server.stat_clients_type_memory[CLIENT_TYPE_PUBSUB] + server.stat_clients_type_memory[CLIENT_TYPE_NORMAL]; mem_total += mh->clients_normal; @@ -1272,7 +1265,7 @@ sds getMemoryDoctorReport(void) { int high_alloc_frag = 0; /* High allocator fragmentation. */ int high_proc_rss = 0; /* High process rss overhead. */ int high_alloc_rss = 0; /* High rss overhead. */ - int big_slave_buf = 0; /* Slave buffers are too big. */ + int big_replica_buf = 0; /* Replica buffers are too big. */ int big_client_buf = 0; /* Client buffers are too big. */ int many_scripts = 0; /* Script cache has too many scripts. */ int num_reports = 0; @@ -1313,16 +1306,16 @@ sds getMemoryDoctorReport(void) { } /* Clients using more than 200k each average? */ - long numslaves = listLength(server.slaves); - long numclients = listLength(server.clients) - numslaves; + long num_replicas = listLength(server.replicas); + long numclients = listLength(server.clients) - num_replicas; if (mh->clients_normal / numclients > (1024 * 200)) { big_client_buf = 1; num_reports++; } - /* Slaves using more than 10 MB each? */ - if (numslaves > 0 && mh->clients_slaves > (1024 * 1024 * 10)) { - big_slave_buf = 1; + /* Replicas using more than 10 MB each? */ + if (num_replicas > 0 && mh->clients_replicas > (1024 * 1024 * 10)) { + big_replica_buf = 1; num_reports++; } @@ -1387,7 +1380,7 @@ sds getMemoryDoctorReport(void) { "1.1 (this means that the Resident Set Size of the Valkey process is much larger than the RSS the " "allocator holds). This problem may be due to Lua scripts or Modules.\n\n"); } - if (big_slave_buf) { + if (big_replica_buf) { s = sdscat(s, " * Big replica buffers: The replica output buffers in this instance are greater than 10MB for " "each replica (on average). This likely means that there is some replica instance that is " @@ -1580,7 +1573,7 @@ NULL addReplyLongLong(c, mh->repl_backlog); addReplyBulkCString(c, "clients.slaves"); - addReplyLongLong(c, mh->clients_slaves); + addReplyLongLong(c, mh->clients_replicas); addReplyBulkCString(c, "clients.normal"); addReplyLongLong(c, mh->clients_normal); diff --git a/src/rdb.c b/src/rdb.c index 5e398dee74..ad7da17ea1 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -36,9 +36,11 @@ #include "functions.h" #include "intset.h" /* Compact integer set structure */ #include "bio.h" +#include "zmalloc.h" #include #include +#include #include #include #include @@ -106,6 +108,30 @@ void rdbReportError(int corruption_error, int linenum, char *reason, ...) { exit(1); } +typedef struct { + rdbAuxFieldEncoder encoder; + rdbAuxFieldDecoder decoder; +} rdbAuxFieldCodec; + +dictType rdbAuxFieldDictType = { + dictSdsCaseHash, /* hash function */ + NULL, /* key dup */ + dictSdsKeyCaseCompare, /* key compare */ + dictSdsDestructor, /* key destructor */ + dictVanillaFree, /* val destructor */ + NULL /* allow to expand */ +}; + +dict *rdbAuxFields = NULL; + +int rdbRegisterAuxField(char *auxfield, rdbAuxFieldEncoder encoder, rdbAuxFieldDecoder decoder) { + if (rdbAuxFields == NULL) rdbAuxFields = dictCreate(&rdbAuxFieldDictType); + rdbAuxFieldCodec *codec = zmalloc(sizeof(rdbAuxFieldCodec)); + codec->encoder = encoder; + codec->decoder = decoder; + return dictAdd(rdbAuxFields, sdsnew(auxfield), (void *)codec) == DICT_OK ? C_OK : C_ERR; +} + ssize_t rdbWriteRaw(rio *rdb, void *p, size_t len) { if (rdb && rioWrite(rdb, p, len) == 0) return -1; return len; @@ -1182,9 +1208,27 @@ int rdbSaveInfoAuxFields(rio *rdb, int rdbflags, rdbSaveInfo *rsi) { if (rsi) { if (rdbSaveAuxFieldStrInt(rdb, "repl-stream-db", rsi->repl_stream_db) == -1) return -1; if (rdbSaveAuxFieldStrStr(rdb, "repl-id", server.replid) == -1) return -1; - if (rdbSaveAuxFieldStrInt(rdb, "repl-offset", server.master_repl_offset) == -1) return -1; + if (rdbSaveAuxFieldStrInt(rdb, "repl-offset", server.primary_repl_offset) == -1) return -1; } if (rdbSaveAuxFieldStrInt(rdb, "aof-base", aof_base) == -1) return -1; + + /* Handle additional dynamic aux fields */ + if (rdbAuxFields != NULL) { + dictIterator di; + dictInitIterator(&di, rdbAuxFields); + dictEntry *de; + while ((de = dictNext(&di)) != NULL) { + rdbAuxFieldCodec *codec = (rdbAuxFieldCodec *)dictGetVal(de); + sds s = codec->encoder(rdbflags); + if (s == NULL) continue; + if (rdbSaveAuxFieldStrStr(rdb, dictGetKey(de), s) == -1) { + sdsfree(s); + return -1; + } + sdsfree(s); + } + } + return 1; } @@ -1368,19 +1412,19 @@ int rdbSaveRio(int req, rio *rdb, int *error, int rdbflags, rdbSaveInfo *rsi) { snprintf(magic, sizeof(magic), "REDIS%04d", RDB_VERSION); if (rdbWriteRaw(rdb, magic, 9) == -1) goto werr; if (rdbSaveInfoAuxFields(rdb, rdbflags, rsi) == -1) goto werr; - if (!(req & SLAVE_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_BEFORE_RDB) == -1) goto werr; + if (!(req & REPLICA_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_BEFORE_RDB) == -1) goto werr; /* save functions */ - if (!(req & SLAVE_REQ_RDB_EXCLUDE_FUNCTIONS) && rdbSaveFunctions(rdb) == -1) goto werr; + if (!(req & REPLICA_REQ_RDB_EXCLUDE_FUNCTIONS) && rdbSaveFunctions(rdb) == -1) goto werr; /* save all databases, skip this if we're in functions-only mode */ - if (!(req & SLAVE_REQ_RDB_EXCLUDE_DATA)) { + if (!(req & REPLICA_REQ_RDB_EXCLUDE_DATA)) { for (j = 0; j < server.dbnum; j++) { if (rdbSaveDb(rdb, j, rdbflags, &key_counter) == -1) goto werr; } } - if (!(req & SLAVE_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_AFTER_RDB) == -1) goto werr; + if (!(req & REPLICA_REQ_RDB_EXCLUDE_DATA) && rdbSaveModulesAux(rdb, VALKEYMODULE_AUX_AFTER_RDB) == -1) goto werr; /* EOF opcode */ if (rdbSaveType(rdb, RDB_OPCODE_EOF) == -1) goto werr; @@ -1494,7 +1538,7 @@ static int rdbSaveInternal(int req, const char *filename, rdbSaveInfo *rsi, int int rdbSaveToFile(const char *filename) { startSaving(RDBFLAGS_NONE); - if (rdbSaveInternal(SLAVE_REQ_NONE, filename, NULL, RDBFLAGS_NONE) != C_OK) { + if (rdbSaveInternal(REPLICA_REQ_NONE, filename, NULL, RDBFLAGS_NONE) != C_OK) { int saved_errno = errno; stopSaving(0); errno = saved_errno; @@ -1815,8 +1859,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { int deep_integrity_validation = server.sanitize_dump_payload == SANITIZE_DUMP_YES; if (server.sanitize_dump_payload == SANITIZE_DUMP_CLIENTS) { /* Skip sanitization when loading (an RDB), or getting a RESTORE command - * from either the master or a client using an ACL user with the skip-sanitize-payload flag. */ - int skip = server.loading || (server.current_client && (server.current_client->flags & CLIENT_MASTER)); + * from either the primary or a client using an ACL user with the skip-sanitize-payload flag. */ + int skip = server.loading || (server.current_client && (server.current_client->flags & CLIENT_PRIMARY)); if (!skip && server.current_client && server.current_client->user) skip = !!(server.current_client->user->flags & USER_FLAG_SANITIZE_PAYLOAD_SKIP); deep_integrity_validation = !skip; @@ -2433,12 +2477,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { } while (listpacks--) { - /* Get the master ID, the one we'll use as key of the radix tree + /* Get the primary ID, the one we'll use as key of the radix tree * node: the entries inside the listpack itself are delta-encoded * relatively to this ID. */ sds nodekey = rdbGenericLoadStringObject(rdb, RDB_LOAD_SDS, NULL); if (nodekey == NULL) { - rdbReportReadError("Stream master ID loading failed: invalid encoding or I/O error."); + rdbReportReadError("Stream primary ID loading failed: invalid encoding or I/O error."); decrRefCount(o); return NULL; } @@ -2882,13 +2926,13 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { if (server.loading_process_events_interval_bytes && (r->processed_bytes + len) / server.loading_process_events_interval_bytes > r->processed_bytes / server.loading_process_events_interval_bytes) { - if (server.masterhost && server.repl_state == REPL_STATE_TRANSFER) replicationSendNewlineToMaster(); + if (server.primary_host && server.repl_state == REPL_STATE_TRANSFER) replicationSendNewlineToPrimary(); loadingAbsProgress(r->processed_bytes); processEventsWhileBlocked(); processModuleLoadingProgressEvent(0); } if (server.repl_state == REPL_STATE_TRANSFER && rioCheckType(r) == RIO_TYPE_CONN) { - atomicIncr(server.stat_net_repl_input_bytes, len); + atomic_fetch_add_explicit(&server.stat_net_repl_input_bytes, len, memory_order_relaxed); } } @@ -3020,10 +3064,9 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin if ((dbid = rdbLoadLen(rdb, NULL)) == RDB_LENERR) goto eoferr; if (dbid >= (unsigned)server.dbnum) { serverLog(LL_WARNING, - "FATAL: Data file was created with a Redis " - "server configured to handle more than %d " - "databases. Exiting\n", - server.dbnum); + "FATAL: Data file was created with a %s server configured to handle " + "more than %d databases. Exiting\n", + SERVER_TITLE, server.dbnum); exit(1); } db = rdb_loading_ctx->dbarray + dbid; @@ -3081,7 +3124,7 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin } else if (!strcasecmp(auxkey->ptr, "redis-ver")) { serverLog(LL_NOTICE, "Loading RDB produced by Redis version %s", (char *)auxval->ptr); } else if (!strcasecmp(auxkey->ptr, "valkey-ver")) { - serverLog(LL_NOTICE, "Loading RDB produced by valkey version %s", (char *)auxval->ptr); + serverLog(LL_NOTICE, "Loading RDB produced by Valkey version %s", (char *)auxval->ptr); } else if (!strcasecmp(auxkey->ptr, "ctime")) { time_t age = time(NULL) - strtol(auxval->ptr, NULL, 10); if (age < 0) age = 0; @@ -3099,9 +3142,22 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin } else if (!strcasecmp(auxkey->ptr, "redis-bits")) { /* Just ignored. */ } else { - /* We ignore fields we don't understand, as by AUX field - * contract. */ - serverLog(LL_DEBUG, "Unrecognized RDB AUX field: '%s'", (char *)auxkey->ptr); + /* Check if this is a dynamic aux field */ + int handled = 0; + if (rdbAuxFields != NULL) { + dictEntry *de = dictFind(rdbAuxFields, auxkey->ptr); + if (de != NULL) { + handled = 1; + rdbAuxFieldCodec *codec = (rdbAuxFieldCodec *)dictGetVal(de); + if (codec->decoder(rdbflags, auxval->ptr) < 0) goto eoferr; + } + } + + if (!handled) { + /* We ignore fields we don't understand, as by AUX field + * contract. */ + serverLog(LL_DEBUG, "Unrecognized RDB AUX field: '%s'", (char *)auxkey->ptr); + } } decrRefCount(auxkey); @@ -3197,9 +3253,9 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin /* Check if the key already expired. This function is used when loading * an RDB file from disk, either at startup, or when an RDB was - * received from the master. In the latter case, the master is + * received from the primary. In the latter case, the primary is * responsible for key expiry. If we would expire keys here, the - * snapshot taken by the master may not be reflected on the slave. + * snapshot taken by the primary may not be reflected on the replica. * Similarly, if the base AOF is RDB format, we want to load all * the keys they are, since the log of operations in the incr AOF * is assumed to work in the exact keyspace state. */ @@ -3215,18 +3271,18 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin sdsfree(key); goto eoferr; } - } else if (iAmMaster() && !(rdbflags & RDBFLAGS_AOF_PREAMBLE) && expiretime != -1 && expiretime < now) { + } else if (iAmPrimary() && !(rdbflags & RDBFLAGS_AOF_PREAMBLE) && expiretime != -1 && expiretime < now) { if (rdbflags & RDBFLAGS_FEED_REPL) { /* Caller should have created replication backlog, * and now this path only works when rebooting, * so we don't have replicas yet. */ - serverAssert(server.repl_backlog != NULL && listLength(server.slaves) == 0); + serverAssert(server.repl_backlog != NULL && listLength(server.replicas) == 0); robj keyobj; initStaticStringObject(keyobj, key); robj *argv[2]; argv[0] = server.lazyfree_lazy_expire ? shared.unlink : shared.del; argv[1] = &keyobj; - replicationFeedSlaves(dbid, argv, 2); + replicationFeedReplicas(dbid, argv, 2); } sdsfree(key); decrRefCount(val); @@ -3378,7 +3434,7 @@ static void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal, time_t sav } /* A background saving child (BGSAVE) terminated its work. Handle this. - * This function covers the case of RDB -> Slaves socket transfers for + * This function covers the case of RDB -> Replicas socket transfers for * diskless replication. */ static void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { if (!bysignal && exitcode == 0) { @@ -3416,9 +3472,9 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) { server.rdb_child_type = RDB_CHILD_TYPE_NONE; server.rdb_save_time_last = save_end - server.rdb_save_time_start; server.rdb_save_time_start = -1; - /* Possibly there are slaves waiting for a BGSAVE in order to be served + /* Possibly there are replicas waiting for a BGSAVE in order to be served * (the first stage of SYNC is a bulk transfer of dump.rdb) */ - updateSlavesWaitingBgsave((!bysignal && exitcode == 0) ? C_OK : C_ERR, type); + updateReplicasWaitingBgsave((!bysignal && exitcode == 0) ? C_OK : C_ERR, type); } /* Kill the RDB saving child using SIGUSR1 (so that the parent will know @@ -3434,9 +3490,9 @@ void killRDBChild(void) { * - rdbRemoveTempFile */ } -/* Spawn an RDB child that writes the RDB to the sockets of the slaves - * that are currently in SLAVE_STATE_WAIT_BGSAVE_START state. */ -int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi) { +/* Spawn an RDB child that writes the RDB to the sockets of the replicas + * that are currently in REPLICA_STATE_WAIT_BGSAVE_START state. */ +int rdbSaveToReplicasSockets(int req, rdbSaveInfo *rsi) { listNode *ln; listIter li; pid_t childpid; @@ -3468,17 +3524,17 @@ int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi) { /* Collect the connections of the replicas we want to transfer * the RDB to, which are i WAIT_BGSAVE_START state. */ - server.rdb_pipe_conns = zmalloc(sizeof(connection *) * listLength(server.slaves)); + server.rdb_pipe_conns = zmalloc(sizeof(connection *) * listLength(server.replicas)); server.rdb_pipe_numconns = 0; server.rdb_pipe_numconns_writing = 0; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - /* Check slave has the exact requirements */ - if (slave->slave_req != req) continue; - server.rdb_pipe_conns[server.rdb_pipe_numconns++] = slave->conn; - replicationSetupSlaveForFullResync(slave, getPsyncInitialOffset()); + client *replica = ln->value; + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) { + /* Check replica has the exact requirements */ + if (replica->replica_req != req) continue; + server.rdb_pipe_conns[server.rdb_pipe_numconns++] = replica->conn; + replicationSetupReplicaForFullResync(replica, getPsyncInitialOffset()); } } @@ -3522,13 +3578,13 @@ int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi) { serverLog(LL_WARNING, "Can't save in background: fork: %s", strerror(errno)); /* Undo the state change. The caller will perform cleanup on - * all the slaves in BGSAVE_START state, but an early call to - * replicationSetupSlaveForFullResync() turned it into BGSAVE_END */ - listRewind(server.slaves, &li); + * all the replicas in BGSAVE_START state, but an early call to + * replicationSetupReplicaForFullResync() turned it into BGSAVE_END */ + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { - slave->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + client *replica = ln->value; + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_END) { + replica->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; } } close(rdb_pipe_write); @@ -3563,7 +3619,7 @@ void saveCommand(client *c) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - if (rdbSave(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { + if (rdbSave(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { addReply(c, shared.ok); } else { addReplyErrorObject(c, shared.err); @@ -3599,7 +3655,7 @@ void bgsaveCommand(client *c) { "Use BGSAVE SCHEDULE in order to schedule a BGSAVE whenever " "possible."); } - } else if (rdbSaveBackground(SLAVE_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { + } else if (rdbSaveBackground(REPLICA_REQ_NONE, server.rdb_filename, rsiptr, RDBFLAGS_NONE) == C_OK) { addReplyStatus(c, "Background saving started"); } else { addReplyErrorObject(c, shared.err); @@ -3608,48 +3664,48 @@ void bgsaveCommand(client *c) { /* Populate the rdbSaveInfo structure used to persist the replication * information inside the RDB file. Currently the structure explicitly - * contains just the currently selected DB from the master stream, however + * contains just the currently selected DB from the primary stream, however * if the rdbSave*() family functions receive a NULL rsi structure also * the Replication ID/offset is not saved. The function populates 'rsi' * that is normally stack-allocated in the caller, returns the populated - * pointer if the instance has a valid master client, otherwise NULL + * pointer if the instance has a valid primary client, otherwise NULL * is returned, and the RDB saving will not persist any replication related * information. */ rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) { rdbSaveInfo rsi_init = RDB_SAVE_INFO_INIT; *rsi = rsi_init; - /* If the instance is a master, we can populate the replication info + /* If the instance is a primary, we can populate the replication info * only when repl_backlog is not NULL. If the repl_backlog is NULL, * it means that the instance isn't in any replication chains. In this - * scenario the replication info is useless, because when a slave + * scenario the replication info is useless, because when a replica * connects to us, the NULL repl_backlog will trigger a full * synchronization, at the same time we will use a new replid and clear * replid2. */ - if (!server.masterhost && server.repl_backlog) { - /* Note that when server.slaveseldb is -1, it means that this master + if (!server.primary_host && server.repl_backlog) { + /* Note that when server.replicas_eldb is -1, it means that this primary * didn't apply any write commands after a full synchronization. - * So we can let repl_stream_db be 0, this allows a restarted slave + * So we can let repl_stream_db be 0, this allows a restarted replica * to reload replication ID/offset, it's safe because the next write * command must generate a SELECT statement. */ - rsi->repl_stream_db = server.slaveseldb == -1 ? 0 : server.slaveseldb; + rsi->repl_stream_db = server.replicas_eldb == -1 ? 0 : server.replicas_eldb; return rsi; } - /* If the instance is a slave we need a connected master + /* If the instance is a replica we need a connected primary * in order to fetch the currently selected DB. */ - if (server.master) { - rsi->repl_stream_db = server.master->db->id; + if (server.primary) { + rsi->repl_stream_db = server.primary->db->id; return rsi; } - /* If we have a cached master we can use it in order to populate the - * replication selected DB info inside the RDB file: the slave can - * increment the master_repl_offset only from data arriving from the - * master, so if we are disconnected the offset in the cached master + /* If we have a cached primary we can use it in order to populate the + * replication selected DB info inside the RDB file: the replica can + * increment the primary_repl_offset only from data arriving from the + * primary, so if we are disconnected the offset in the cached primary * is valid. */ - if (server.cached_master) { - rsi->repl_stream_db = server.cached_master->db->id; + if (server.cached_primary) { + rsi->repl_stream_db = server.cached_primary->db->id; return rsi; } return NULL; diff --git a/src/rdb.h b/src/rdb.h index 48762e10e1..393d2f658a 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -154,7 +154,7 @@ int rdbSaveObjectType(rio *rdb, robj *o); int rdbLoadObjectType(rio *rdb); int rdbLoad(char *filename, rdbSaveInfo *rsi, int rdbflags); int rdbSaveBackground(int req, char *filename, rdbSaveInfo *rsi, int rdbflags); -int rdbSaveToSlavesSockets(int req, rdbSaveInfo *rsi); +int rdbSaveToReplicasSockets(int req, rdbSaveInfo *rsi); void rdbRemoveTempFile(pid_t childpid, int from_signal); int rdbSaveToFile(const char *filename); int rdbSave(int req, char *filename, rdbSaveInfo *rsi, int rdbflags); diff --git a/src/replication.c b/src/replication.c index a3268f41db..4fe8470371 100644 --- a/src/replication.c +++ b/src/replication.c @@ -39,14 +39,15 @@ #include #include #include +#include #include #include void replicationDiscardCachedMaster(void); void replicationResurrectCachedMaster(connection *conn); void replicationSendAck(void); -int replicaPutOnline(client *slave); -void replicaStartCommandStream(client *slave); +int replicaPutOnline(client *replica); +void replicaStartCommandStream(client *replica); int cancelReplicationHandshake(int reconnect); /* We take a global flag to remember if this instance generated an RDB @@ -63,20 +64,20 @@ static ConnectionType *connTypeOfReplication(void) { return connectionTypeTcp(); } -/* Return the pointer to a string representing the slave ip:listening_port - * pair. Mostly useful for logging, since we want to log a slave using its +/* Return the pointer to a string representing the replica ip:listening_port + * pair. Mostly useful for logging, since we want to log a replica using its * IP address and its listening port which is more clear for the user, for * example: "Closing connection with replica 10.1.2.3:6380". */ -char *replicationGetSlaveName(client *c) { +char *replicationGetReplicaName(client *c) { static char buf[NET_HOST_PORT_STR_LEN]; char ip[NET_IP_STR_LEN]; ip[0] = '\0'; buf[0] = '\0'; - if (c->slave_addr || connAddrPeerName(c->conn, ip, sizeof(ip), NULL) != -1) { - char *addr = c->slave_addr ? c->slave_addr : ip; - if (c->slave_listening_port) - formatAddr(buf, sizeof(buf), addr, c->slave_listening_port); + if (c->replica_addr || connAddrPeerName(c->conn, ip, sizeof(ip), NULL) != -1) { + char *addr = c->replica_addr ? c->replica_addr : ip; + if (c->replica_listening_port) + formatAddr(buf, sizeof(buf), addr, c->replica_listening_port); else snprintf(buf, sizeof(buf), "%s:", addr); } else { @@ -125,7 +126,7 @@ void createReplicationBacklog(void) { /* We don't have any data inside our buffer, but virtually the first * byte we have is the next byte that will be generated for the * replication stream. */ - server.repl_backlog->offset = server.master_repl_offset + 1; + server.repl_backlog->offset = server.primary_repl_offset + 1; } /* This function is called when the user modifies the replication backlog @@ -140,7 +141,7 @@ void resizeReplicationBacklog(void) { } void freeReplicationBacklog(void) { - serverAssert(listLength(server.slaves) == 0); + serverAssert(listLength(server.replicas) == 0); if (server.repl_backlog == NULL) return; /* Decrease the start buffer node reference count. */ @@ -173,7 +174,7 @@ void createReplicationBacklogIndex(listNode *ln) { } /* Rebase replication buffer blocks' offset since the initial - * setting offset starts from 0 when master restart. */ + * setting offset starts from 0 when primary restart. */ void rebaseReplicationBuffer(long long base_repl_offset) { raxFree(server.repl_backlog->blocks_index); server.repl_backlog->blocks_index = raxNew(); @@ -200,7 +201,7 @@ int canFeedReplicaReplBuffer(client *replica) { if (replica->flags & CLIENT_REPL_RDBONLY) return 0; /* Don't feed replicas that are still waiting for BGSAVE to start. */ - if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START) return 0; + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) return 0; return 1; } @@ -214,11 +215,11 @@ int prepareReplicasToWrite(void) { listNode *ln; int prepared = 0; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (!canFeedReplicaReplBuffer(slave)) continue; - if (prepareClientToWrite(slave) == C_ERR) continue; + client *replica = ln->value; + if (!canFeedReplicaReplBuffer(replica)) continue; + if (prepareClientToWrite(replica) == C_ERR) continue; prepared++; } @@ -258,7 +259,7 @@ void incrementalTrimReplicationBacklog(size_t max_blocks) { /* Replicas increment the refcount of the first replication buffer block * they refer to, in that case, we don't trim the backlog even if * backlog_histlen exceeds backlog_size. This implicitly makes backlog - * bigger than our setting, but makes the master accept partial resync as + * bigger than our setting, but makes the primary accept partial resync as * much as possible. So that backlog must be the last reference of * replication buffer blocks. */ listNode *first = listFirst(server.repl_buffer_blocks); @@ -293,7 +294,7 @@ void incrementalTrimReplicationBacklog(size_t max_blocks) { } /* Set the offset of the first byte we have in the backlog. */ - server.repl_backlog->offset = server.master_repl_offset - server.repl_backlog->histlen + 1; + server.repl_backlog->offset = server.primary_repl_offset - server.repl_backlog->histlen + 1; } /* Free replication buffer blocks that are referenced by this client. */ @@ -338,7 +339,7 @@ void feedReplicationBuffer(char *s, size_t len) { tail->used += copy; s += copy; len -= copy; - server.master_repl_offset += copy; + server.primary_repl_offset += copy; server.repl_backlog->histlen += copy; } if (len) { @@ -356,7 +357,7 @@ void feedReplicationBuffer(char *s, size_t len) { size_t copy = (tail->size >= len) ? len : tail->size; tail->used = copy; tail->refcount = 0; - tail->repl_offset = server.master_repl_offset + 1; + tail->repl_offset = server.primary_repl_offset + 1; tail->id = repl_block_id++; memcpy(tail->buf, s, copy); listAddNodeTail(server.repl_buffer_blocks, tail); @@ -369,27 +370,27 @@ void feedReplicationBuffer(char *s, size_t len) { } s += copy; len -= copy; - server.master_repl_offset += copy; + server.primary_repl_offset += copy; server.repl_backlog->histlen += copy; } /* For output buffer of replicas. */ listIter li; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - if (!canFeedReplicaReplBuffer(slave)) continue; + client *replica = ln->value; + if (!canFeedReplicaReplBuffer(replica)) continue; /* Update shared replication buffer start position. */ - if (slave->ref_repl_buf_node == NULL) { - slave->ref_repl_buf_node = start_node; - slave->ref_block_pos = start_pos; + if (replica->ref_repl_buf_node == NULL) { + replica->ref_repl_buf_node = start_node; + replica->ref_block_pos = start_pos; /* Only increase the start block reference count. */ ((replBufBlock *)listNodeValue(start_node))->refcount++; } /* Check output buffer limit only when add new block. */ - if (add_new_block) closeClientOnOutputBufferLimitReached(slave, 1); + if (add_new_block) closeClientOnOutputBufferLimitReached(replica, 1); } /* For replication backlog */ @@ -416,11 +417,11 @@ void feedReplicationBuffer(char *s, size_t len) { /* Propagate write commands to replication stream. * - * This function is used if the instance is a master: we use the commands + * This function is used if the instance is a primary: we use the commands * received by our clients in order to create the replication stream. * Instead if the instance is a replica and has sub-replicas attached, we use * replicationFeedStreamFromMasterStream() */ -void replicationFeedSlaves(int dictid, robj **argv, int argc) { +void replicationFeedReplicas(int dictid, robj **argv, int argc) { int j, len; char llstr[LONG_STR_SIZE]; @@ -428,32 +429,32 @@ void replicationFeedSlaves(int dictid, robj **argv, int argc) { * pass dbid=-1 that indicate there is no need to replicate `select` command. */ serverAssert(dictid == -1 || (dictid >= 0 && dictid < server.dbnum)); - /* If the instance is not a top level master, return ASAP: we'll just proxy - * the stream of data we receive from our master instead, in order to - * propagate *identical* replication stream. In this way this slave can - * advertise the same replication ID as the master (since it shares the - * master replication history and has the same backlog and offsets). */ - if (server.masterhost != NULL) return; + /* If the instance is not a top level primary, return ASAP: we'll just proxy + * the stream of data we receive from our primary instead, in order to + * propagate *identical* replication stream. In this way this replica can + * advertise the same replication ID as the primary (since it shares the + * primary replication history and has the same backlog and offsets). */ + if (server.primary_host != NULL) return; - /* If there aren't slaves, and there is no backlog buffer to populate, + /* If there aren't replicas, and there is no backlog buffer to populate, * we can return ASAP. */ - if (server.repl_backlog == NULL && listLength(server.slaves) == 0) { + if (server.repl_backlog == NULL && listLength(server.replicas) == 0) { /* We increment the repl_offset anyway, since we use that for tracking AOF fsyncs * even when there's no replication active. This code will not be reached if AOF * is also disabled. */ - server.master_repl_offset += 1; + server.primary_repl_offset += 1; return; } - /* We can't have slaves attached and no backlog. */ - serverAssert(!(listLength(server.slaves) != 0 && server.repl_backlog == NULL)); + /* We can't have replicas attached and no backlog. */ + serverAssert(!(listLength(server.replicas) != 0 && server.repl_backlog == NULL)); /* Must install write handler for all replicas first before feeding * replication stream. */ prepareReplicasToWrite(); - /* Send SELECT command to every slave if needed. */ - if (dictid != -1 && server.slaveseldb != dictid) { + /* Send SELECT command to every replica if needed. */ + if (dictid != -1 && server.replicas_eldb != dictid) { robj *selectcmd; /* For a few DBs we have pre-computed SELECT command. */ @@ -471,7 +472,7 @@ void replicationFeedSlaves(int dictid, robj **argv, int argc) { if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS) decrRefCount(selectcmd); - server.slaveseldb = dictid; + server.replicas_eldb = dictid; } /* Write the command to the replication buffer if any. */ @@ -531,12 +532,12 @@ void showLatestBacklog(void) { sdsfree(dump); } -/* This function is used in order to proxy what we receive from our master - * to our sub-slaves. */ +/* This function is used in order to proxy what we receive from our primary + * to our sub-replicas. */ #include -void replicationFeedStreamFromMasterStream(char *buf, size_t buflen) { - /* Debugging: this is handy to see the stream sent from master - * to slaves. Disabled with if(0). */ +void replicationFeedStreamFromPrimaryStream(char *buf, size_t buflen) { + /* Debugging: this is handy to see the stream sent from primary + * to replicas. Disabled with if(0). */ if (0) { printf("%zu:", buflen); for (size_t j = 0; j < buflen; j++) { @@ -545,8 +546,8 @@ void replicationFeedStreamFromMasterStream(char *buf, size_t buflen) { printf("\n"); } - /* There must be replication backlog if having attached slaves. */ - if (listLength(server.slaves)) serverAssert(server.repl_backlog != NULL); + /* There must be replication backlog if having attached replicas. */ + if (listLength(server.replicas)) serverAssert(server.repl_backlog != NULL); if (server.repl_backlog) { /* Must install write handler for all replicas first before feeding * replication stream. */ @@ -595,7 +596,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, decrRefCount(cmdobj); } -/* Feed the slave 'c' with the replication backlog starting from the +/* Feed the replica 'c' with the replication backlog starting from the * specified 'offset' up to the end of the backlog. */ long long addReplyReplicationBacklog(client *c, long long offset) { long long skip; @@ -662,46 +663,46 @@ long long addReplyReplicationBacklog(client *c, long long offset) { } /* Return the offset to provide as reply to the PSYNC command received - * from the slave. The returned value is only valid immediately after + * from the replica. The returned value is only valid immediately after * the BGSAVE process started and before executing any other command * from clients. */ long long getPsyncInitialOffset(void) { - return server.master_repl_offset; + return server.primary_repl_offset; } /* Send a FULLRESYNC reply in the specific case of a full resynchronization, - * as a side effect setup the slave for a full sync in different ways: + * as a side effect setup the replica for a full sync in different ways: * - * 1) Remember, into the slave client structure, the replication offset - * we sent here, so that if new slaves will later attach to the same + * 1) Remember, into the replica client structure, the replication offset + * we sent here, so that if new replicas will later attach to the same * background RDB saving process (by duplicating this client output - * buffer), we can get the right offset from this slave. - * 2) Set the replication state of the slave to WAIT_BGSAVE_END so that + * buffer), we can get the right offset from this replica. + * 2) Set the replication state of the replica to WAIT_BGSAVE_END so that * we start accumulating differences from this point. * 3) Force the replication stream to re-emit a SELECT statement so - * the new slave incremental differences will start selecting the + * the new replica incremental differences will start selecting the * right database number. * * Normally this function should be called immediately after a successful * BGSAVE for replication was started, or when there is one already in - * progress that we attached our slave to. */ -int replicationSetupSlaveForFullResync(client *slave, long long offset) { + * progress that we attached our replica to. */ +int replicationSetupReplicaForFullResync(client *replica, long long offset) { char buf[128]; int buflen; - slave->psync_initial_offset = offset; - slave->replstate = SLAVE_STATE_WAIT_BGSAVE_END; + replica->psync_initial_offset = offset; + replica->repl_state = REPLICA_STATE_WAIT_BGSAVE_END; /* We are going to accumulate the incremental changes for this - * slave as well. Set slaveseldb to -1 in order to force to re-emit + * replica as well. Set replicas_eldb to -1 in order to force to re-emit * a SELECT statement in the replication stream. */ - server.slaveseldb = -1; + server.replicas_eldb = -1; - /* Don't send this reply to slaves that approached us with + /* Don't send this reply to replicas that approached us with * the old SYNC command. */ - if (!(slave->flags & CLIENT_PRE_PSYNC)) { + if (!(replica->flags & CLIENT_PRE_PSYNC)) { buflen = snprintf(buf, sizeof(buf), "+FULLRESYNC %s %lld\r\n", server.replid, offset); - if (connWrite(slave->conn, buf, buflen) != buflen) { - freeClientAsync(slave); + if (connWrite(replica->conn, buf, buflen) != buflen) { + freeClientAsync(replica); return C_ERR; } } @@ -709,32 +710,32 @@ int replicationSetupSlaveForFullResync(client *slave, long long offset) { } /* This function handles the PSYNC command from the point of view of a - * master receiving a request for partial resynchronization. + * primary receiving a request for partial resynchronization. * * On success return C_OK, otherwise C_ERR is returned and we proceed * with the usual full resync. */ -int masterTryPartialResynchronization(client *c, long long psync_offset) { +int primaryTryPartialResynchronization(client *c, long long psync_offset) { long long psync_len; - char *master_replid = c->argv[1]->ptr; + char *primary_replid = c->argv[1]->ptr; char buf[128]; int buflen; - /* Is the replication ID of this master the same advertised by the wannabe - * slave via PSYNC? If the replication ID changed this master has a + /* Is the replication ID of this primary the same advertised by the wannabe + * replica via PSYNC? If the replication ID changed this primary has a * different replication history, and there is no way to continue. * * Note that there are two potentially valid replication IDs: the ID1 * and the ID2. The ID2 however is only valid up to a specific offset. */ - if (strcasecmp(master_replid, server.replid) && - (strcasecmp(master_replid, server.replid2) || psync_offset > server.second_replid_offset)) { - /* Replid "?" is used by slaves that want to force a full resync. */ - if (master_replid[0] != '?') { - if (strcasecmp(master_replid, server.replid) && strcasecmp(master_replid, server.replid2)) { + if (strcasecmp(primary_replid, server.replid) && + (strcasecmp(primary_replid, server.replid2) || psync_offset > server.second_replid_offset)) { + /* Replid "?" is used by replicas that want to force a full resync. */ + if (primary_replid[0] != '?') { + if (strcasecmp(primary_replid, server.replid) && strcasecmp(primary_replid, server.replid2)) { serverLog(LL_NOTICE, "Partial resynchronization not accepted: " "Replication ID mismatch (Replica asked for '%s', my " "replication IDs are '%s' and '%s')", - master_replid, server.replid, server.replid2); + primary_replid, server.replid, server.replid2); } else { serverLog(LL_NOTICE, "Partial resynchronization not accepted: " @@ -743,39 +744,39 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { psync_offset, server.second_replid_offset); } } else { - serverLog(LL_NOTICE, "Full resync requested by replica %s", replicationGetSlaveName(c)); + serverLog(LL_NOTICE, "Full resync requested by replica %s", replicationGetReplicaName(c)); } goto need_full_resync; } - /* We still have the data our slave is asking for? */ + /* We still have the data our replica is asking for? */ if (!server.repl_backlog || psync_offset < server.repl_backlog->offset || psync_offset > (server.repl_backlog->offset + server.repl_backlog->histlen)) { serverLog(LL_NOTICE, "Unable to partial resync with replica %s for lack of backlog (Replica request was: %lld).", - replicationGetSlaveName(c), psync_offset); - if (psync_offset > server.master_repl_offset) { - serverLog( - LL_WARNING, - "Warning: replica %s tried to PSYNC with an offset that is greater than the master replication offset.", - replicationGetSlaveName(c)); + replicationGetReplicaName(c), psync_offset); + if (psync_offset > server.primary_repl_offset) { + serverLog(LL_WARNING, + "Warning: replica %s tried to PSYNC with an offset that is greater than the primary replication " + "offset.", + replicationGetReplicaName(c)); } goto need_full_resync; } /* If we reached this point, we are able to perform a partial resync: - * 1) Set client state to make it a slave. + * 1) Set client state to make it a replica. * 2) Inform the client we can continue with +CONTINUE - * 3) Send the backlog data (from the offset to the end) to the slave. */ - c->flags |= CLIENT_SLAVE; - c->replstate = SLAVE_STATE_ONLINE; + * 3) Send the backlog data (from the offset to the end) to the replica. */ + c->flags |= CLIENT_REPLICA; + c->repl_state = REPLICA_STATE_ONLINE; c->repl_ack_time = server.unixtime; c->repl_start_cmd_stream_on_ack = 0; - listAddNodeTail(server.slaves, c); + listAddNodeTail(server.replicas, c); /* We can't use the connection buffers since they are used to accumulate * new commands at this stage. But we are sure the socket send buffer is * empty so this write will never fail actually. */ - if (c->slave_capa & SLAVE_CAPA_PSYNC2) { + if (c->replica_capa & REPLICA_CAPA_PSYNC2) { buflen = snprintf(buf, sizeof(buf), "+CONTINUE %s\r\n", server.replid); } else { buflen = snprintf(buf, sizeof(buf), "+CONTINUE\r\n"); @@ -788,12 +789,12 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { serverLog( LL_NOTICE, "Partial resynchronization request from %s accepted. Sending %lld bytes of backlog starting from offset %lld.", - replicationGetSlaveName(c), psync_len, psync_offset); - /* Note that we don't need to set the selected DB at server.slaveseldb - * to -1 to force the master to emit SELECT, since the slave already - * has this state from the previous connection with the master. */ + replicationGetReplicaName(c), psync_len, psync_offset); + /* Note that we don't need to set the selected DB at server.replicas_eldb + * to -1 to force the primary to emit SELECT, since the replica already + * has this state from the previous connection with the primary. */ - refreshGoodSlavesCount(); + refreshGoodReplicasCount(); /* Fire the replica change modules event. */ moduleFireServerEvent(VALKEYMODULE_EVENT_REPLICA_CHANGE, VALKEYMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE, NULL); @@ -803,7 +804,7 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { need_full_resync: /* We need a full resync for some reason... Note that we can't * reply to PSYNC right now if a full SYNC is needed. The reply - * must include the master offset at the time the RDB file we transfer + * must include the primary offset at the time the RDB file we transfer * is generated, so we need to delay the reply to that moment. */ return C_ERR; } @@ -812,15 +813,15 @@ int masterTryPartialResynchronization(client *c, long long psync_offset) { * socket target depending on the configuration, and making sure that * the script cache is flushed before to start. * - * The mincapa argument is the bitwise AND among all the slaves capabilities - * of the slaves waiting for this BGSAVE, so represents the slave capabilities - * all the slaves support. Can be tested via SLAVE_CAPA_* macros. + * The mincapa argument is the bitwise AND among all the replicas capabilities + * of the replicas waiting for this BGSAVE, so represents the replica capabilities + * all the replicas support. Can be tested via REPLICA_CAPA_* macros. * * Side effects, other than starting a BGSAVE: * - * 1) Handle the slaves in WAIT_START state, by preparing them for a full + * 1) Handle the replicas in WAIT_START state, by preparing them for a full * sync if the BGSAVE was successfully started, or sending them an error - * and dropping them from the list of slaves. + * and dropping them from the list of replicas. * * 2) Flush the Lua scripting script cache if the BGSAVE was actually * started. @@ -832,22 +833,22 @@ int startBgsaveForReplication(int mincapa, int req) { listIter li; listNode *ln; - /* We use a socket target if slave can handle the EOF marker and we're configured to do diskless syncs. + /* We use a socket target if replica can handle the EOF marker and we're configured to do diskless syncs. * Note that in case we're creating a "filtered" RDB (functions-only, for example) we also force socket replication * to avoid overwriting the snapshot RDB file with filtered data. */ - socket_target = (server.repl_diskless_sync || req & SLAVE_REQ_RDB_MASK) && (mincapa & SLAVE_CAPA_EOF); + socket_target = (server.repl_diskless_sync || req & REPLICA_REQ_RDB_MASK) && (mincapa & REPLICA_CAPA_EOF); /* `SYNC` should have failed with error if we don't support socket and require a filter, assert this here */ - serverAssert(socket_target || !(req & SLAVE_REQ_RDB_MASK)); + serverAssert(socket_target || !(req & REPLICA_REQ_RDB_MASK)); serverLog(LL_NOTICE, "Starting BGSAVE for SYNC with target: %s", socket_target ? "replicas sockets" : "disk"); rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); /* Only do rdbSave* when rsiptr is not NULL, - * otherwise slave will miss repl-stream-db. */ + * otherwise replica will miss repl-stream-db. */ if (rsiptr) { if (socket_target) - retval = rdbSaveToSlavesSockets(req, rsiptr); + retval = rdbSaveToReplicasSockets(req, rsiptr); else { /* Keep the page cache since it'll get used soon */ retval = rdbSaveBackground(req, server.rdb_filename, rsiptr, RDBFLAGS_REPLICATION | RDBFLAGS_KEEP_CACHE); @@ -865,37 +866,37 @@ int startBgsaveForReplication(int mincapa, int req) { * the user enables it later with CONFIG SET, we are fine. */ if (retval == C_OK && !socket_target && server.rdb_del_sync_files) RDBGeneratedByReplication = 1; - /* If we failed to BGSAVE, remove the slaves waiting for a full - * resynchronization from the list of slaves, inform them with + /* If we failed to BGSAVE, remove the replicas waiting for a full + * resynchronization from the list of replicas, inform them with * an error about what happened, close the connection ASAP. */ if (retval == C_ERR) { serverLog(LL_WARNING, "BGSAVE for replication failed"); - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; - - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - slave->replstate = REPL_STATE_NONE; - slave->flags &= ~CLIENT_SLAVE; - listDelNode(server.slaves, ln); - addReplyError(slave, "BGSAVE failed, replication can't continue"); - slave->flags |= CLIENT_CLOSE_AFTER_REPLY; + client *replica = ln->value; + + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) { + replica->repl_state = REPL_STATE_NONE; + replica->flags &= ~CLIENT_REPLICA; + listDelNode(server.replicas, ln); + addReplyError(replica, "BGSAVE failed, replication can't continue"); + replica->flags |= CLIENT_CLOSE_AFTER_REPLY; } } return retval; } - /* If the target is socket, rdbSaveToSlavesSockets() already setup - * the slaves for a full resync. Otherwise for disk target do it now.*/ + /* If the target is socket, rdbSaveToReplicasSockets() already setup + * the replicas for a full resync. Otherwise for disk target do it now.*/ if (!socket_target) { - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - client *slave = ln->value; + client *replica = ln->value; - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - /* Check slave has the exact requirements */ - if (slave->slave_req != req) continue; - replicationSetupSlaveForFullResync(slave, getPsyncInitialOffset()); + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_START) { + /* Check replica has the exact requirements */ + if (replica->replica_req != req) continue; + replicationSetupReplicaForFullResync(replica, getPsyncInitialOffset()); } } } @@ -905,23 +906,23 @@ int startBgsaveForReplication(int mincapa, int req) { /* SYNC and PSYNC command implementation. */ void syncCommand(client *c) { - /* ignore SYNC if already slave or in monitor mode */ - if (c->flags & CLIENT_SLAVE) return; + /* ignore SYNC if already replica or in monitor mode */ + if (c->flags & CLIENT_REPLICA) return; /* Check if this is a failover request to a replica with the same replid and - * become a master if so. */ + * become a primary if so. */ if (c->argc > 3 && !strcasecmp(c->argv[0]->ptr, "psync") && !strcasecmp(c->argv[3]->ptr, "failover")) { serverLog(LL_NOTICE, "Failover request received for replid %s.", (unsigned char *)c->argv[1]->ptr); - if (!server.masterhost) { + if (!server.primary_host) { addReplyError(c, "PSYNC FAILOVER can't be sent to a master."); return; } if (!strcasecmp(c->argv[1]->ptr, server.replid)) { if (server.cluster_enabled) { - clusterPromoteSelfToMaster(); + clusterPromoteSelfToPrimary(); } else { - replicationUnsetMaster(); + replicationUnsetPrimary(); } sds client = catClientInfoString(sdsempty(), c); serverLog(LL_NOTICE, "MASTER MODE enabled (failover request from '%s')", client); @@ -938,9 +939,9 @@ void syncCommand(client *c) { return; } - /* Refuse SYNC requests if we are a slave but the link with our master + /* Refuse SYNC requests if we are a replica but the link with our primary * is not ok... */ - if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED) { + if (server.primary_host && server.repl_state != REPL_STATE_CONNECTED) { addReplyError(c, "-NOMASTERLINK Can't SYNC while not connected with my master"); return; } @@ -948,54 +949,54 @@ void syncCommand(client *c) { /* SYNC can't be issued when the server has pending data to send to * the client about already issued commands. We need a fresh reply * buffer registering the differences between the BGSAVE and the current - * dataset, so that we can copy to other slaves if needed. */ + * dataset, so that we can copy to other replicas if needed. */ if (clientHasPendingReplies(c)) { addReplyError(c, "SYNC and PSYNC are invalid with pending output"); return; } - /* Fail sync if slave doesn't support EOF capability but wants a filtered RDB. This is because we force filtered + /* Fail sync if replica doesn't support EOF capability but wants a filtered RDB. This is because we force filtered * RDB's to be generated over a socket and not through a file to avoid conflicts with the snapshot files. Forcing * use of a socket is handled, if needed, in `startBgsaveForReplication`. */ - if (c->slave_req & SLAVE_REQ_RDB_MASK && !(c->slave_capa & SLAVE_CAPA_EOF)) { + if (c->replica_req & REPLICA_REQ_RDB_MASK && !(c->replica_capa & REPLICA_CAPA_EOF)) { addReplyError(c, "Filtered replica requires EOF capability"); return; } - serverLog(LL_NOTICE, "Replica %s asks for synchronization", replicationGetSlaveName(c)); + serverLog(LL_NOTICE, "Replica %s asks for synchronization", replicationGetReplicaName(c)); /* Try a partial resynchronization if this is a PSYNC command. * If it fails, we continue with usual full resynchronization, however - * when this happens replicationSetupSlaveForFullResync will replied + * when this happens replicationSetupReplicaForFullResync will replied * with: * * +FULLRESYNC * - * So the slave knows the new replid and offset to try a PSYNC later - * if the connection with the master is lost. */ + * So the replica knows the new replid and offset to try a PSYNC later + * if the connection with the primary is lost. */ if (!strcasecmp(c->argv[0]->ptr, "psync")) { long long psync_offset; if (getLongLongFromObjectOrReply(c, c->argv[2], &psync_offset, NULL) != C_OK) { serverLog(LL_WARNING, "Replica %s asks for synchronization but with a wrong offset", - replicationGetSlaveName(c)); + replicationGetReplicaName(c)); return; } - if (masterTryPartialResynchronization(c, psync_offset) == C_OK) { + if (primaryTryPartialResynchronization(c, psync_offset) == C_OK) { server.stat_sync_partial_ok++; return; /* No full resync needed, return. */ } else { - char *master_replid = c->argv[1]->ptr; + char *primary_replid = c->argv[1]->ptr; /* Increment stats for failed PSYNCs, but only if the - * replid is not "?", as this is used by slaves to force a full + * replid is not "?", as this is used by replicas to force a full * resync on purpose when they are not able to partially * resync. */ - if (master_replid[0] != '?') server.stat_sync_partial_err++; + if (primary_replid[0] != '?') server.stat_sync_partial_err++; } } else { - /* If a slave uses SYNC, we are dealing with an old implementation - * of the replication protocol (like valkey-cli --slave). Flag the client + /* If a replica uses SYNC, we are dealing with an old implementation + * of the replication protocol (like valkey-cli --replica). Flag the client * so that we don't expect to receive REPLCONF ACK feedbacks. */ c->flags |= CLIENT_PRE_PSYNC; } @@ -1003,16 +1004,16 @@ void syncCommand(client *c) { /* Full resynchronization. */ server.stat_sync_full++; - /* Setup the slave as one waiting for BGSAVE to start. The following code - * paths will change the state if we handle the slave differently. */ - c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; + /* Setup the replica as one waiting for BGSAVE to start. The following code + * paths will change the state if we handle the replica differently. */ + c->repl_state = REPLICA_STATE_WAIT_BGSAVE_START; if (server.repl_disable_tcp_nodelay) connDisableTcpNoDelay(c->conn); /* Non critical if it fails. */ c->repldbfd = -1; - c->flags |= CLIENT_SLAVE; - listAddNodeTail(server.slaves, c); + c->flags |= CLIENT_REPLICA; + listAddNodeTail(server.replicas, c); /* Create the replication backlog if needed. */ - if (listLength(server.slaves) == 1 && server.repl_backlog == NULL) { + if (listLength(server.replicas) == 1 && server.repl_backlog == NULL) { /* When we create the backlog from scratch, we always use a new * replication ID and clear the ID2, since there is no valid * past history. */ @@ -1028,30 +1029,31 @@ void syncCommand(client *c) { /* CASE 1: BGSAVE is in progress, with disk target. */ if (server.child_type == CHILD_TYPE_RDB && server.rdb_child_type == RDB_CHILD_TYPE_DISK) { /* Ok a background save is in progress. Let's check if it is a good - * one for replication, i.e. if there is another slave that is + * one for replication, i.e. if there is another replica that is * registering differences since the server forked to save. */ - client *slave; + client *replica; listNode *ln; listIter li; - listRewind(server.slaves, &li); + listRewind(server.replicas, &li); while ((ln = listNext(&li))) { - slave = ln->value; + replica = ln->value; /* If the client needs a buffer of commands, we can't use * a replica without replication buffer. */ - if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END && - (!(slave->flags & CLIENT_REPL_RDBONLY) || (c->flags & CLIENT_REPL_RDBONLY))) + if (replica->repl_state == REPLICA_STATE_WAIT_BGSAVE_END && + (!(replica->flags & CLIENT_REPL_RDBONLY) || (c->flags & CLIENT_REPL_RDBONLY))) break; } - /* To attach this slave, we check that it has at least all the - * capabilities of the slave that triggered the current BGSAVE + /* To attach this replica, we check that it has at least all the + * capabilities of the replica that triggered the current BGSAVE * and its exact requirements. */ - if (ln && ((c->slave_capa & slave->slave_capa) == slave->slave_capa) && c->slave_req == slave->slave_req) { + if (ln && ((c->replica_capa & replica->replica_capa) == replica->replica_capa) && + c->replica_req == replica->replica_req) { /* Perfect, the server is already registering differences for - * another slave. Set the right state, and copy the buffer. + * another replica. Set the right state, and copy the buffer. * We don't copy buffer if clients don't want. */ - if (!(c->flags & CLIENT_REPL_RDBONLY)) copyReplicaOutputBuffer(c, slave); - replicationSetupSlaveForFullResync(c, slave->psync_initial_offset); + if (!(c->flags & CLIENT_REPL_RDBONLY)) copyReplicaOutputBuffer(c, replica); + replicationSetupReplicaForFullResync(c, replica->psync_initial_offset); serverLog(LL_NOTICE, "Waiting for end of BGSAVE for SYNC"); } else { /* No way, we need to wait for the next BGSAVE in order to @@ -1068,16 +1070,16 @@ void syncCommand(client *c) { /* CASE 3: There is no BGSAVE is in progress. */ } else { - if (server.repl_diskless_sync && (c->slave_capa & SLAVE_CAPA_EOF) && server.repl_diskless_sync_delay) { + if (server.repl_diskless_sync && (c->replica_capa & REPLICA_CAPA_EOF) && server.repl_diskless_sync_delay) { /* Diskless replication RDB child is created inside * replicationCron() since we want to delay its start a - * few seconds to wait for more slaves to arrive. */ + * few seconds to wait for more replicas to arrive. */ serverLog(LL_NOTICE, "Delay next BGSAVE for diskless SYNC"); } else { /* We don't have a BGSAVE in progress, let's start one. Diskless * or disk-based mode is determined by replica's capacity. */ if (!hasActiveChildProcess()) { - startBgsaveForReplication(c->slave_capa, c->slave_req); + startBgsaveForReplication(c->replica_capa, c->replica_req); } else { serverLog(LL_NOTICE, "No BGSAVE in progress, but another BG operation is active. " "BGSAVE for replication delayed"); @@ -1090,7 +1092,7 @@ void syncCommand(client *c) { /* REPLCONF