diff --git a/deps/Makefile b/deps/Makefile index 3bf0363d5c..67b7d41026 100644 --- a/deps/Makefile +++ b/deps/Makefile @@ -1,4 +1,4 @@ -# Redis dependency Makefile +# Dependency Makefile uname_S:= $(shell sh -c 'uname -s 2>/dev/null || echo not') diff --git a/src/acl.c b/src/acl.c index f0668a4ecd..d9577b2124 100644 --- a/src/acl.c +++ b/src/acl.c @@ -168,7 +168,7 @@ typedef struct { * execute this command. * * If the bit for a given command is NOT set and the command has - * allowed first-args, Redis will also check allowed_firstargs in order to + * allowed first-args, the server will also check allowed_firstargs in order to * understand if the command can be executed. */ uint64_t allowed_commands[USER_COMMAND_BITS_COUNT/64]; /* allowed_firstargs is used by ACL rules to block access to a command unless a @@ -502,7 +502,7 @@ void ACLFreeUserAndKillClients(user *u) { if (c->user == u) { /* We'll free the connection asynchronously, so * in theory to set a different user is not needed. - * However if there are bugs in Redis, soon or later + * However if there are bugs in the server, soon or later * this may result in some security hole: it's much * more defensive to set the default user and put * it in non authenticated mode. */ @@ -1023,7 +1023,7 @@ aclSelector *aclCreateSelectorFromOpSet(const char *opset, size_t opsetlen) { * +@ Allow the execution of all the commands in such category * with valid categories are like @admin, @set, @sortedset, ... * and so forth, see the full list in the server.c file where - * the Redis command table is described and defined. + * the command table is described and defined. * The special category @all means all the commands, but currently * present in the server, and that will be loaded in the future * via modules. @@ -3204,7 +3204,7 @@ void addReplyCommandCategories(client *c, struct serverCommand *cmd) { } /* AUTH - * AUTH (Redis >= 6.0 form) + * AUTH (Redis OSS >= 6.0 form) * * When the user is omitted it means that we are trying to authenticate * against the default user. */ diff --git a/src/ae.c b/src/ae.c index 839d23b51d..cc67f2580e 100644 --- a/src/ae.c +++ b/src/ae.c @@ -258,7 +258,7 @@ int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id) * If there are no timers, -1 is returned. * * Note that's O(N) since time events are unsorted. - * Possible optimizations (not needed by Redis so far, but...): + * Possible optimizations (not needed so far, but...): * 1) Insert the event in order, so that the nearest is just the head. * Much better but still insertion or deletion of timers is O(N). * 2) Use a skiplist to have this operation as O(1) and insertion as O(log(N)). diff --git a/src/anet.c b/src/anet.c index e4f9ecf37a..0a05fb5624 100644 --- a/src/anet.c +++ b/src/anet.c @@ -372,7 +372,7 @@ int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, static int anetSetReuseAddr(char *err, int fd) { int yes = 1; - /* Make sure connection-intensive things like the redis benchmark + /* Make sure connection-intensive things like the benchmark tool * will be able to close/open sockets a zillion of times */ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) == -1) { anetSetError(err, "setsockopt SO_REUSEADDR: %s", strerror(errno)); @@ -388,7 +388,7 @@ static int anetCreateSocket(char *err, int domain) { return ANET_ERR; } - /* Make sure connection-intensive things like the redis benchmark + /* Make sure connection-intensive things like the benchmark tool * will be able to close/open sockets a zillion of times */ if (anetSetReuseAddr(err,s) == ANET_ERR) { close(s); diff --git a/src/aof.c b/src/aof.c index ab24770b9a..a6a89c931b 100644 --- a/src/aof.c +++ b/src/aof.c @@ -59,11 +59,11 @@ void aof_background_fsync_and_close(int fd); * * Append-only files consist of three types: * - * BASE: Represents a Redis snapshot from the time of last AOF rewrite. The manifest + * BASE: Represents a server snapshot from the time of last AOF rewrite. The manifest * file contains at most a single BASE file, which will always be the first file in the * list. * - * INCR: Represents all write commands executed by Redis following the last successful + * INCR: Represents all write commands executed by the server following the last successful * AOF rewrite. In some cases it is possible to have several ordered INCR files. For * example: * - During an on-going AOF rewrite @@ -119,7 +119,7 @@ aofInfo *aofInfoDup(aofInfo *orig) { /* Format aofInfo as a string and it will be a line in the manifest. * - * When update this format, make sure to update redis-check-aof as well. */ + * When update this format, make sure to update valkey-check-aof as well. */ sds aofInfoFormat(sds buf, aofInfo *ai) { sds filename_repr = NULL; @@ -222,10 +222,10 @@ sds getAofManifestAsString(aofManifest *am) { } /* Load the manifest information from the disk to `server.aof_manifest` - * when the Redis server start. + * when the server starts. * * During loading, this function does strict error checking and will abort - * the entire Redis server process on error (I/O error, invalid format, etc.) + * the entire server process on error (I/O error, invalid format, etc.) * * If the AOF directory or manifest file do not exist, this will be ignored * in order to support seamless upgrades from previous versions which did not @@ -253,7 +253,7 @@ void aofLoadManifestFromDisk(void) { sdsfree(am_filepath); } -/* Generic manifest loading function, used in `aofLoadManifestFromDisk` and redis-check-aof tool. */ +/* Generic manifest loading function, used in `aofLoadManifestFromDisk` and valkey-check-aof tool. */ #define MANIFEST_MAX_LINE 1024 aofManifest *aofLoadManifestFromFile(sds am_filepath) { const char *err = NULL; @@ -609,7 +609,7 @@ int persistAofManifest(aofManifest *am) { return ret; } -/* Called in `loadAppendOnlyFiles` when we upgrade from a old version redis. +/* Called in `loadAppendOnlyFiles` when we upgrade from a old version of the server. * * 1) Create AOF directory use 'server.aof_dirname' as the name. * 2) Use 'server.aof_filename' to construct a BASE type aofInfo and add it to @@ -617,7 +617,7 @@ int persistAofManifest(aofManifest *am) { * 3) Move the old AOF file (server.aof_filename) to AOF directory. * * If any of the above steps fails or crash occurs, this will not cause any - * problems, and redis will retry the upgrade process when it restarts. + * problems, and the server will retry the upgrade process when it restarts. */ void aofUpgradePrepare(aofManifest *am) { serverAssert(!aofFileExist(server.aof_filename)); @@ -704,13 +704,13 @@ void aofDelTempIncrAofFile(void) { return; } -/* Called after `loadDataFromDisk` when redis start. If `server.aof_state` is +/* Called after `loadDataFromDisk` when the server starts. If `server.aof_state` is * 'AOF_ON', It will do three things: - * 1. Force create a BASE file when redis starts with an empty dataset + * 1. Force create a BASE file when the server starts with an empty dataset * 2. Open the last opened INCR type AOF for writing, If not, create a new one * 3. Synchronously update the manifest file to the disk * - * If any of the above steps fails, the redis process will exit. + * If any of the above steps fails, the server process will exit. */ void aofOpenIfNeededOnServerStart(void) { if (server.aof_state != AOF_ON) { @@ -856,7 +856,7 @@ int openNewIncrAofForAppend(void) { /* Whether to limit the execution of Background AOF rewrite. * - * At present, if AOFRW fails, redis will automatically retry. If it continues + * At present, if AOFRW fails, the server will automatically retry. If it continues * to fail, we may get a lot of very small INCR files. so we need an AOFRW * limiting measure. * @@ -1371,7 +1371,7 @@ void feedAppendOnlyFile(int dictid, robj **argv, int argc) { * AOF loading * ------------------------------------------------------------------------- */ -/* In Redis commands are always executed in the context of a client, so in +/* Commands are always executed in the context of a client, so in * order to load the append only file we need to create a fake client. */ struct client *createAOFClient(void) { struct client *c = createClient(NULL); @@ -1390,7 +1390,7 @@ struct client *createAOFClient(void) { c->flags = CLIENT_DENY_BLOCKING; /* We set the fake client as a slave waiting for the synchronization - * so that Redis will not try to send replies to this client. */ + * so that the server will not try to send replies to this client. */ c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; return c; } @@ -1664,7 +1664,7 @@ int loadAppendOnlyFiles(aofManifest *am) { int total_num, aof_num = 0, last_file; /* If the 'server.aof_filename' file exists in dir, we may be starting - * from an old redis version. We will use enter upgrade mode in three situations. + * from an old server version. We will use enter upgrade mode in three situations. * * 1. If the 'server.aof_dirname' directory not exist * 2. If the 'server.aof_dirname' directory exists but the manifest file is missing @@ -1954,7 +1954,7 @@ int rewriteSortedSetObject(rio *r, robj *key, robj *o) { } /* Write either the key or the value of the currently selected item of a hash. - * The 'hi' argument passes a valid Redis hash iterator. + * The 'hi' argument passes a valid hash iterator. * The 'what' filed specifies if to write a key or a value and can be * either OBJ_HASH_KEY or OBJ_HASH_VALUE. * @@ -2208,7 +2208,7 @@ int rewriteStreamObject(rio *r, robj *key, robj *o) { } /* Call the module type callback in order to rewrite a data type - * that is exported by a module and is not handled by Redis itself. + * that is exported by a module and is not handled by the server itself. * The function returns 0 on error, 1 on success. */ int rewriteModuleObject(rio *r, robj *key, robj *o, int dbid) { ValkeyModuleIO io; @@ -2347,7 +2347,7 @@ int rewriteAppendOnlyFileRio(rio *aof) { * "filename". Used both by REWRITEAOF and BGREWRITEAOF. * * In order to minimize the number of commands needed in the rewritten - * log Redis uses variadic commands when possible, such as RPUSH, SADD + * log, the server uses variadic commands when possible, such as RPUSH, SADD * and ZADD. However at max AOF_REWRITE_ITEMS_PER_CMD items per time * are inserted using a single command. */ int rewriteAppendOnlyFile(char *filename) { @@ -2419,7 +2419,7 @@ int rewriteAppendOnlyFile(char *filename) { /* This is how rewriting of the append only file in background works: * * 1) The user calls BGREWRITEAOF - * 2) Redis calls this function, that forks(): + * 2) The server calls this function, that forks(): * 2a) the child rewrite the append only file in a temp file. * 2b) the parent open a new INCR AOF file to continue writing. * 3) When the child finished '2a' exists. diff --git a/src/atomicvar.h b/src/atomicvar.h index b506c5d29e..17d1c15a6d 100644 --- a/src/atomicvar.h +++ b/src/atomicvar.h @@ -83,13 +83,13 @@ /* Define serverAtomic for atomic variable. */ #define serverAtomic -/* To test Redis with Helgrind (a Valgrind tool) it is useful to define +/* To test the server with Helgrind (a Valgrind tool) it is useful to define * the following macro, so that __sync macros are used: those can be detected * by Helgrind (even if they are less efficient) so that no false positive * is reported. */ // #define __ATOMIC_VAR_FORCE_SYNC_MACROS -/* There will be many false positives if we test Redis with Helgrind, since +/* There will be many false positives if we test the server with Helgrind, since * Helgrind can't understand we have imposed ordering on the program, so * we use macros in helgrind.h to tell Helgrind inter-thread happens-before * relationship explicitly for avoiding false positives. diff --git a/src/bio.c b/src/bio.c index da62222426..c6b4d77c0e 100644 --- a/src/bio.c +++ b/src/bio.c @@ -1,4 +1,4 @@ -/* Background I/O service for Redis. +/* Background I/O service for the server. * * This file implements operations that we need to perform in the background. * Currently there is only a single operation, that is a background close(2) @@ -8,7 +8,7 @@ * * In the future we'll either continue implementing new things we need or * we'll switch to libeio. However there are probably long term uses for this - * file as we may want to put here Redis specific background tasks (for instance + * file as we may want to put here server specific background tasks (for instance * it is not impossible that we'll need a non blocking FLUSHDB/FLUSHALL * implementation). * @@ -323,7 +323,7 @@ void bioDrainWorker(int job_type) { /* Kill the running bio threads in an unclean way. This function should be * used only when it's critical to stop the threads for some reason. - * Currently Redis does this only on crash (for instance on SIGSEGV) in order + * Currently the server does this only on crash (for instance on SIGSEGV) in order * to perform a fast memory check without other threads messing with memory. */ void bioKillThreads(void) { int err; diff --git a/src/bitops.c b/src/bitops.c index 5b97c033aa..611ab931b4 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -406,7 +406,7 @@ void printBits(unsigned char *p, unsigned long count) { /* This helper function used by GETBIT / SETBIT parses the bit offset argument * making sure an error is returned if it is negative or if it overflows - * Redis 512 MB limit for the string value or more (server.proto_max_bulk_len). + * the server's 512 MB limit for the string value or more (server.proto_max_bulk_len). * * If the 'hash' argument is true, and 'bits is positive, then the command * will also parse bit offsets prefixed by "#". In such a case the offset @@ -443,7 +443,7 @@ int getBitOffsetFromArgument(client *c, robj *o, uint64_t *offset, int hash, int /* This helper function for BITFIELD parses a bitfield type in the form * where sign is 'u' or 'i' for unsigned and signed, and * the bits is a value between 1 and 64. However 64 bits unsigned integers - * are reported as an error because of current limitations of Redis protocol + * are reported as an error because of current limitations of RESP * to return unsigned integer values greater than INT64_MAX. * * On error C_ERR is returned and an error is sent to the client. */ diff --git a/src/blocked.c b/src/blocked.c index aeac106706..ad815a9b6c 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -292,7 +292,7 @@ void disconnectAllBlockedClients(void) { } } -/* This function should be called by Redis every time a single command, +/* This function should be called by the server every time a single command, * a MULTI/EXEC block, or a Lua script, terminated its execution after * being called by a client. It handles serving clients blocked in all scenarios * where a specific key access requires to block until that key is available. @@ -310,7 +310,7 @@ void disconnectAllBlockedClients(void) { * do client side, indeed!). Because mismatching clients (blocking for * a different type compared to the current key type) are moved in the * other side of the linked list. However as long as the key starts to - * be used only for a single type, like virtually any Redis application will + * be used only for a single type, like virtually any application will * do, the function is already fair. */ void handleClientsBlockedOnKeys(void) { diff --git a/src/call_reply.c b/src/call_reply.c index 0afaf4469e..9e910f50c6 100644 --- a/src/call_reply.c +++ b/src/call_reply.c @@ -525,7 +525,7 @@ list *callReplyDeferredErrorList(CallReply *rep) { * callReplyGetPrivateData(). * * NOTE: The parser used for parsing the reply and producing CallReply is - * designed to handle valid replies created by Redis itself. IT IS NOT + * designed to handle valid replies created by the server itself. IT IS NOT * DESIGNED TO HANDLE USER INPUT and using it to parse invalid replies is * unsafe. */ diff --git a/src/cli_commands.h b/src/cli_commands.h index 4642272eb4..5669d2cf4b 100644 --- a/src/cli_commands.h +++ b/src/cli_commands.h @@ -1,6 +1,6 @@ -/* This file is used by redis-cli in place of server.h when including commands.c +/* This file is used by valkey-cli in place of server.h when including commands.c * It contains alternative structs which omit the parts of the commands table - * that are not suitable for redis-cli, e.g. the command proc. */ + * that are not suitable for valkey-cli, e.g. the command proc. */ #ifndef VALKEY_CLI_COMMANDS_H #define VALKEY_CLI_COMMANDS_H diff --git a/src/cluster.c b/src/cluster.c index ca002a17bb..99c02cd86d 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -30,7 +30,7 @@ /* * cluster.c contains the common parts of a clustering * implementation, the parts that are shared between - * any implementation of Redis clustering. + * any implementation of clustering. */ #include "server.h" @@ -142,7 +142,7 @@ void createDumpPayload(rio *payload, robj *o, robj *key, int dbid) { payload->io.buffer.ptr = sdscatlen(payload->io.buffer.ptr,&crc,8); } -/* Verify that the RDB version of the dump payload matches the one of this Redis +/* Verify that the RDB version of the dump payload matches the one of this * instance and that the checksum is ok. * If the DUMP payload looks valid C_OK is returned, otherwise C_ERR * is returned. If rdbver_ptr is not NULL, its populated with the value read @@ -173,7 +173,7 @@ int verifyDumpPayload(unsigned char *p, size_t len, uint16_t *rdbver_ptr) { } /* DUMP keyname - * DUMP is actually not used by Redis Cluster but it is the obvious + * DUMP is actually not used by Cluster but it is the obvious * complement of RESTORE and can be useful for different applications. */ void dumpCommand(client *c) { robj *o; @@ -687,7 +687,7 @@ void migrateCommand(client *c) { if (!copy) { /* Translate MIGRATE as DEL for replication/AOF. Note that we do * this only for the keys for which we received an acknowledgement - * from the receiving Redis server, by using the del_idx index. */ + * from the receiving server, by using the del_idx index. */ if (del_idx > 1) { newargv[0] = createStringObject("DEL",3); /* Note that the following call takes ownership of newargv. */ @@ -1007,7 +1007,7 @@ clusterNode *getNodeByQuery(client *c, struct serverCommand *cmd, robj **argv, i /* Set error code optimistically for the base case. */ if (error_code) *error_code = CLUSTER_REDIR_NONE; - /* Modules can turn off Redis Cluster redirection: this is useful + /* Modules can turn off Cluster redirection: this is useful * when writing a module that implements a completely different * distributed system. */ @@ -1446,7 +1446,7 @@ void clusterCommandSlots(client * c) { /* The ASKING command is required after a -ASK redirection. * The client should issue ASKING before to actually send the command to - * the target instance. See the Redis Cluster specification for more + * the target instance. See the Cluster specification for more * information. */ void askingCommand(client *c) { if (server.cluster_enabled == 0) { diff --git a/src/cluster.h b/src/cluster.h index 463b4940d9..a7211615dd 100644 --- a/src/cluster.h +++ b/src/cluster.h @@ -2,7 +2,7 @@ #define __CLUSTER_H /*----------------------------------------------------------------------------- - * Redis cluster exported API. + * Cluster exported API. *----------------------------------------------------------------------------*/ #define CLUSTER_SLOT_MASK_BITS 14 /* Number of bits used for slot id. */ @@ -25,9 +25,9 @@ typedef struct _clusterNode clusterNode; struct clusterState; -/* Flags that a module can set in order to prevent certain Redis Cluster +/* Flags that a module can set in order to prevent certain Cluster * features to be enabled. Useful when implementing a different distributed - * system on top of Redis Cluster message bus, using modules. */ + * system on top of Cluster message bus, using modules. */ #define CLUSTER_MODULE_FLAG_NONE 0 #define CLUSTER_MODULE_FLAG_NO_FAILOVER (1<<1) #define CLUSTER_MODULE_FLAG_NO_REDIRECTION (1<<2) diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index 5874433d7d..01c531aaa0 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -29,7 +29,7 @@ /* * cluster_legacy.c contains the implementation of the cluster API that is - * specific to the standard, Redis cluster-bus based clustering mechanism. + * specific to the standard, cluster-bus based clustering mechanism. */ #include "server.h" @@ -169,7 +169,7 @@ dictType clusterSdsToListType = { NULL /* allow to expand */ }; -/* Aux fields are introduced in Redis 7.2 to support the persistence +/* Aux fields were introduced in Redis OSS 7.2 to support the persistence * of various important node properties, such as shard id, in nodes.conf. * Aux fields take an explicit format of name=value pairs and have no * intrinsic order among them. Aux fields are always grouped together @@ -349,7 +349,7 @@ int clusterLoadConfig(char *filename) { /* Parse the file. Note that single lines of the cluster config file can * be really long as they include all the hash slots of the node. - * This means in the worst possible case, half of the Redis slots will be + * This means in the worst possible case, half of the slots will be * present in a single line, possibly in importing or migrating state, so * together with the node ID of the sender/receiver. * @@ -372,7 +372,7 @@ int clusterLoadConfig(char *filename) { if (argv == NULL) goto fmterr; /* Handle the special "vars" line. Don't pretend it is the last - * line even if it actually is when generated by Redis. */ + * line even if it actually is when generated by the server. */ if (strcasecmp(argv[0],"vars") == 0) { if (!(argc % 2)) goto fmterr; for (j = 1; j < argc; j += 2) { @@ -583,7 +583,7 @@ int clusterLoadConfig(char *filename) { } else if (auxFieldHandlers[af_shard_id].isPresent(n) == 0) { /* n is a primary but it does not have a persisted shard_id. * This happens if we are loading a nodes.conf generated by - * an older version of Redis. We should manually update the + * an older version of the server. We should manually update the * shard membership in this case */ clusterAddNodeToShard(n->shard_id, n); } @@ -1450,6 +1450,10 @@ int clusterNodeFailureReportsCount(clusterNode *node) { return listLength(node->fail_reports); } +static int clusterNodeNameComparator(const void *node1, const void *node2) { + return strncasecmp((*(clusterNode **)node1)->name, (*(clusterNode **)node2)->name, CLUSTER_NAMELEN); +} + int clusterNodeRemoveSlave(clusterNode *master, clusterNode *slave) { int j; @@ -1479,6 +1483,7 @@ int clusterNodeAddSlave(clusterNode *master, clusterNode *slave) { sizeof(clusterNode*)*(master->numslaves+1)); master->slaves[master->numslaves] = slave; master->numslaves++; + qsort(master->slaves, master->numslaves, sizeof(clusterNode *), clusterNodeNameComparator); master->flags |= CLUSTER_NODE_MIGRATE_TO; return C_OK; } @@ -1586,7 +1591,7 @@ clusterNode *clusterLookupNode(const char *name, int length) { * Note that the list returned is not computed on the fly * via slaveof; rather, it is maintained permanently to * track the shard membership and its life cycle is tied - * to this Redis process. Therefore, the caller must not + * to this process. Therefore, the caller must not * release the list. */ list *clusterGetNodesInMyShard(clusterNode *node) { sds s = sdsnewlen(node->shard_id, CLUSTER_NAMELEN); @@ -1680,7 +1685,7 @@ uint64_t clusterGetMaxEpoch(void) { * * Important note: this function violates the principle that config epochs * should be generated with consensus and should be unique across the cluster. - * However Redis Cluster uses this auto-generated new config epochs in two + * However the cluster uses this auto-generated new config epochs in two * cases: * * 1) When slots are closed after importing. Otherwise resharding would be @@ -1689,7 +1694,7 @@ uint64_t clusterGetMaxEpoch(void) { * failover its master even if there is not master majority able to * create a new configuration epoch. * - * Redis Cluster will not explode using this function, even in the case of + * The cluster will not explode using this function, even in the case of * a collision between this node and another node, generating the same * configuration epoch unilaterally, because the config epoch conflict * resolution algorithm will eventually move colliding nodes to different @@ -1790,7 +1795,7 @@ void clusterHandleConfigEpochCollision(clusterNode *sender) { * about the node we want to remove, we don't re-add it before some time. * * Currently the CLUSTER_BLACKLIST_TTL is set to 1 minute, this means - * that redis-cli has 60 seconds to send CLUSTER FORGET messages to nodes + * that valkey-cli has 60 seconds to send CLUSTER FORGET messages to nodes * in the cluster without dealing with the problem of other nodes re-adding * back the node to nodes we already sent the FORGET command to. * @@ -2356,7 +2361,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc } /* The slot is in importing state, it should be modified only - * manually via redis-cli (example: a resharding is in progress + * manually via valkey-cli (example: a resharding is in progress * and the migrating side slot was already closed and is advertising * a new config. We still want the slot to be closed manually). */ if (server.cluster->importing_slots_from[j]) continue; @@ -2401,7 +2406,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc } /* After updating the slots configuration, don't do any actual change - * in the state of the server if a module disabled Redis Cluster + * in the state of the server if a module disabled Cluster * keys redirections. */ if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return; @@ -2598,9 +2603,7 @@ uint32_t writePingExt(clusterMsg *hdr, int gossipcount) { extensions++; if (hdr != NULL) { - if (extensions != 0) { - hdr->mflags[0] |= CLUSTERMSG_FLAG0_EXT_DATA; - } + hdr->mflags[0] |= CLUSTERMSG_FLAG0_EXT_DATA; hdr->extensions = htons(extensions); } @@ -2791,6 +2794,10 @@ int clusterProcessPacket(clusterLink *link) { sender = getNodeFromLinkAndMsg(link, hdr); + if (sender && (hdr->mflags[0] & CLUSTERMSG_FLAG0_EXT_DATA)) { + sender->flags |= CLUSTER_NODE_EXTENSIONS_SUPPORTED; + } + /* Update the last time we saw any data from this node. We * use this in order to avoid detecting a timeout from a node that * is just sending a lot of data in the cluster bus, for instance @@ -3633,7 +3640,9 @@ void clusterSendPing(clusterLink *link, int type) { * to put inside the packet. */ estlen = sizeof(clusterMsg) - sizeof(union clusterMsgData); estlen += (sizeof(clusterMsgDataGossip)*(wanted + pfail_wanted)); - estlen += writePingExt(NULL, 0); + if (link->node && nodeSupportsExtensions(link->node)) { + estlen += writePingExt(NULL, 0); + } /* Note: clusterBuildMessageHdr() expects the buffer to be always at least * sizeof(clusterMsg) or more. */ if (estlen < (int)sizeof(clusterMsg)) estlen = sizeof(clusterMsg); @@ -3703,7 +3712,13 @@ void clusterSendPing(clusterLink *link, int type) { /* Compute the actual total length and send! */ uint32_t totlen = 0; - totlen += writePingExt(hdr, gossipcount); + + if (link->node && nodeSupportsExtensions(link->node)) { + totlen += writePingExt(hdr, gossipcount); + } else { + serverLog(LL_DEBUG, "Unable to send extensions data, however setting ext data flag to true"); + hdr->mflags[0] |= CLUSTERMSG_FLAG0_EXT_DATA; + } totlen += sizeof(clusterMsg)-sizeof(union clusterMsgData); totlen += (sizeof(clusterMsgDataGossip)*gossipcount); serverAssert(gossipcount < USHRT_MAX); @@ -3717,7 +3732,7 @@ void clusterSendPing(clusterLink *link, int type) { /* Send a PONG packet to every connected node that's not in handshake state * and for which we have a valid link. * - * In Redis Cluster pongs are not used just for failure detection, but also + * In Cluster mode, pongs are not used just for failure detection, but also * to carry important configuration information. So broadcasting a pong is * useful when something changes in the configuration and we want to make * the cluster aware ASAP (for instance after a slave promotion). @@ -3956,7 +3971,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { int j; /* IF we are not a master serving at least 1 slot, we don't have the - * right to vote, as the cluster size in Redis Cluster is the number + * right to vote, as the cluster size is the number * of masters serving at least one slot, and quorum is the cluster * size + 1 */ if (nodeIsSlave(myself) || myself->numslots == 0) return; @@ -5140,7 +5155,7 @@ void clusterUpdateState(void) { * B) If according to our config other nodes are already in charge for * this slots, we set the slots as IMPORTING from our point of view * in order to justify we have those slots, and in order to make - * redis-cli aware of the issue, so that it can try to fix it. + * valkey-cli aware of the issue, so that it can try to fix it. * 2) If we find data in a DB different than DB0 we return C_ERR to * signal the caller it should quit the server with an error message * or take other actions. diff --git a/src/cluster_legacy.h b/src/cluster_legacy.h index a857184ab3..9caf07bae3 100644 --- a/src/cluster_legacy.h +++ b/src/cluster_legacy.h @@ -51,6 +51,7 @@ typedef struct clusterLink { #define CLUSTER_NODE_MEET 128 /* Send a MEET message to this node */ #define CLUSTER_NODE_MIGRATE_TO 256 /* Master eligible for replica migration. */ #define CLUSTER_NODE_NOFAILOVER 512 /* Slave will not try to failover. */ +#define CLUSTER_NODE_EXTENSIONS_SUPPORTED 1024 /* This node supports extensions. */ #define CLUSTER_NODE_NULL_NAME "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" #define nodeIsSlave(n) ((n)->flags & CLUSTER_NODE_SLAVE) @@ -59,6 +60,7 @@ typedef struct clusterLink { #define nodeTimedOut(n) ((n)->flags & CLUSTER_NODE_PFAIL) #define nodeFailed(n) ((n)->flags & CLUSTER_NODE_FAIL) #define nodeCantFailover(n) ((n)->flags & CLUSTER_NODE_NOFAILOVER) +#define nodeSupportsExtensions(n) ((n)->flags & CLUSTER_NODE_EXTENSIONS_SUPPORTED) /* This structure represent elements of node->fail_reports. */ typedef struct clusterNodeFailReport { @@ -66,7 +68,7 @@ typedef struct clusterNodeFailReport { mstime_t time; /* Time of the last report from this node. */ } clusterNodeFailReport; -/* Redis cluster messages header */ +/* Cluster messages header */ /* Message types. * @@ -204,7 +206,7 @@ union clusterMsgData { #define CLUSTER_PROTO_VER 1 /* Cluster bus protocol version. */ typedef struct { - char sig[4]; /* Signature "RCmb" (Redis Cluster message bus). */ + char sig[4]; /* Signature "RCmb" (Cluster message bus). */ uint32_t totlen; /* Total length of this message */ uint16_t ver; /* Protocol version, currently set to 1. */ uint16_t port; /* Primary port number (TCP or TLS). */ @@ -231,8 +233,8 @@ typedef struct { union clusterMsgData data; } clusterMsg; -/* clusterMsg defines the gossip wire protocol exchanged among Redis cluster - * members, which can be running different versions of redis-server bits, +/* clusterMsg defines the gossip wire protocol exchanged among cluster + * members, which can be running different versions of server bits, * especially during cluster rolling upgrades. * * Therefore, fields in this struct should remain at the same offset from diff --git a/src/commands.def b/src/commands.def index 7007568b61..bd6ed38153 100644 --- a/src/commands.def +++ b/src/commands.def @@ -3,7 +3,7 @@ /* We have fabulous commands from * the fantastic - * Redis Command Table! */ + * Command Table! */ /* Must match serverCommandGroup */ const char *COMMAND_GROUP_STR[] = { diff --git a/src/config.c b/src/config.c index e4a0c7c459..534ae355b1 100644 --- a/src/config.c +++ b/src/config.c @@ -1919,7 +1919,7 @@ static int sdsConfigSet(standardConfig *config, sds *argv, int argc, const char /* if prev and new configuration are not equal, set the new one */ if (new != prev && (new == NULL || prev == NULL || sdscmp(prev, new))) { /* If MODULE_CONFIG flag is set, then free temporary prev getModuleStringConfig returned. - * Otherwise, free the actual previous config value Redis held (Same action, different reasons) */ + * Otherwise, free the actual previous config value the server held (Same action, different reasons) */ sdsfree(prev); if (config->flags & MODULE_CONFIG) { @@ -2585,7 +2585,7 @@ int updateRequirePass(const char **err) { /* The old "requirepass" directive just translates to setting * a password to the default user. The only thing we do * additionally is to remember the cleartext password in this - * case, for backward compatibility with Redis <= 5. */ + * case, for backward compatibility. */ ACLUpdateDefaultUserPassword(server.requirepass); return 1; } diff --git a/src/config.h b/src/config.h index 9e2db5ac6c..e550da00f0 100644 --- a/src/config.h +++ b/src/config.h @@ -225,7 +225,7 @@ void setproctitle(const char *fmt, ...); /* Sometimes after including an OS-specific header that defines the * endianness we end with __BYTE_ORDER but not with BYTE_ORDER that is what - * the Redis code uses. In this case let's define everything without the + * the server code uses. In this case let's define everything without the * underscores. */ #ifndef BYTE_ORDER #ifdef __BYTE_ORDER diff --git a/src/connection.h b/src/connection.h index ac48214237..f50cd89d1c 100644 --- a/src/connection.h +++ b/src/connection.h @@ -290,7 +290,7 @@ static inline int connAddr(connection *conn, char *ip, size_t ip_len, int *port, /* Format an IP,port pair into something easy to parse. If IP is IPv6 * (matches for ":"), the ip is surrounded by []. IP and port are just - * separated by colons. This the standard to display addresses within Redis. */ + * separated by colons. This the standard to display addresses within the server. */ static inline int formatAddr(char *buf, size_t buf_len, char *ip, int port) { return snprintf(buf, buf_len, strchr(ip,':') ? "[%s]:%d" : "%s:%d", ip, port); @@ -378,10 +378,10 @@ static inline sds connGetPeerCert(connection *conn) { return NULL; } -/* Initialize the redis connection framework */ +/* Initialize the connection framework */ int connTypeInitialize(void); -/* Register a connection type into redis connection framework */ +/* Register a connection type into the connection framework */ int connTypeRegister(ConnectionType *ct); /* Lookup a connection type by type name */ diff --git a/src/crc16_slottable.h b/src/crc16_slottable.h index f25e2412e8..84e2aa0280 100644 --- a/src/crc16_slottable.h +++ b/src/crc16_slottable.h @@ -1,11 +1,11 @@ #ifndef _CRC16_TABLE_H__ #define _CRC16_TABLE_H__ -/* A table of the shortest possible alphanumeric string that is mapped by redis' crc16 - * to any given redis cluster slot. +/* A table of the shortest possible alphanumeric string that is mapped by crc16 + * to any given cluster slot. * * The array indexes are slot numbers, so that given a desired slot, this string is guaranteed - * to make redis cluster route a request to the shard holding this slot + * to make the cluster route a request to the shard holding this slot */ typedef char crc16_alphastring[4]; diff --git a/src/db.c b/src/db.c index 886f27c0b9..17e74f5881 100644 --- a/src/db.c +++ b/src/db.c @@ -343,7 +343,7 @@ void setKey(client *c, serverDb *db, robj *key, robj *val, int flags) { if (!(flags & SETKEY_NO_SIGNAL)) signalModifiedKey(c,db,key); } -/* Return a random key, in form of a Redis object. +/* Return a random key, in form of an Object. * If there are no keys, NULL is returned. * * The function makes sure to return keys not already expired. */ @@ -427,7 +427,7 @@ int dbAsyncDelete(serverDb *db, robj *key) { return dbGenericDelete(db, key, 1, DB_FLAG_KEY_DELETED); } -/* This is a wrapper whose behavior depends on the Redis lazy free +/* This is a wrapper whose behavior depends on the server lazy free * configuration. Deletes the key synchronously or asynchronously. */ int dbDelete(serverDb *db, robj *key) { return dbGenericDelete(db, key, server.lazyfree_lazy_server_del, DB_FLAG_KEY_DELETED); @@ -507,11 +507,11 @@ long long emptyDbStructure(serverDb *dbarray, int dbnum, int async, } /* Remove all data (keys and functions) from all the databases in a - * Redis server. If callback is given the function is called from + * server. If callback is given the function is called from * time to time to signal that work is in progress. * * The dbnum can be -1 if all the DBs should be flushed, or the specified - * DB number if we want to flush only a single Redis database number. + * DB number if we want to flush only a single database number. * * Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or * EMPTYDB_ASYNC if we want the memory to be freed in a different thread @@ -542,7 +542,7 @@ long long emptyData(int dbnum, int flags, void(callback)(dict*)) { * there. */ signalFlushedDb(dbnum, async); - /* Empty redis database structure. */ + /* Empty the database structure. */ removed = emptyDbStructure(server.db, dbnum, async, callback); if (dbnum == -1) flushSlaveKeysWithExpireList(); @@ -695,7 +695,7 @@ void flushAllDataAndResetRDB(int flags) { /* FLUSHDB [ASYNC] * - * Flushes the currently SELECTed Redis DB. */ + * Flushes the currently SELECTed DB. */ void flushdbCommand(client *c) { int flags; @@ -885,7 +885,7 @@ void scanCallback(void *privdata, const dictEntry *de) { serverAssert(!((data->type != LLONG_MAX) && o)); /* Filter an element if it isn't the type we want. */ - /* TODO: uncomment in redis 8.0 + /* TODO: uncomment in version 8.0 if (!o && data->type != LLONG_MAX) { robj *rval = dictGetVal(de); if (!objectTypeCompare(rval, data->type)) return; @@ -1030,7 +1030,7 @@ void scanGenericCommand(client *c, robj *o, unsigned long long cursor) { typename = c->argv[i+1]->ptr; type = getObjectTypeByName(typename); if (type == LLONG_MAX) { - /* TODO: uncomment in redis 8.0 + /* TODO: uncomment in version 8.0 addReplyErrorFormat(c, "unknown type name '%s'", typename); return; */ } @@ -1185,7 +1185,7 @@ void scanGenericCommand(client *c, robj *o, unsigned long long cursor) { sds key = listNodeValue(ln); initStaticStringObject(kobj, key); /* Filter an element if it isn't the type we want. */ - /* TODO: remove this in redis 8.0 */ + /* TODO: remove this in version 8.0 */ if (typename) { robj* typecheck = lookupKeyReadWithFlags(c->db, &kobj, LOOKUP_NOTOUCH|LOOKUP_NONOTIFY); if (!typecheck || !objectTypeCompare(typecheck, type)) { @@ -1565,7 +1565,7 @@ void scanDatabaseForDeletedKeys(serverDb *emptied, serverDb *replaced_with) { * the new database even if already connected. Note that the client * structure c->db points to a given DB, so we need to be smarter and * swap the underlying referenced structures, otherwise we would need - * to fix all the references to the Redis DB structure. + * to fix all the references to the DB structure. * * Returns C_ERR if at least one of the DB ids are out of range, otherwise * C_OK is returned. */ @@ -2109,7 +2109,7 @@ int getKeysUsingKeySpecs(struct serverCommand *cmd, robj **argv, int argc, int s * length of the array is returned by reference into *numkeys. * * Along with the position, this command also returns the flags that are - * associated with how Redis will access the key. + * associated with how the server will access the key. * * 'cmd' must be point to the corresponding entry into the serverCommand * table, according to the command name in argv[0]. */ @@ -2146,7 +2146,7 @@ int doesCommandHaveKeys(struct serverCommand *cmd) { (getAllKeySpecsFlags(cmd, 1) & CMD_KEY_NOT_KEY); /* has at least one key-spec not marked as NOT_KEY */ } -/* A simplified channel spec table that contains all of the redis commands +/* A simplified channel spec table that contains all of the commands * and which channels they have and how they are accessed. */ typedef struct ChannelSpecs { serverCommandProc *proc; /* Command procedure to match against */ @@ -2193,7 +2193,7 @@ int doesCommandHaveChannelsWithFlags(struct serverCommand *cmd, int flags) { * length of the array is returned by reference into *numkeys. * * Along with the position, this command also returns the flags that are - * associated with how Redis will access the channel. + * associated with how the server will access the channel. * * 'cmd' must be point to the corresponding entry into the serverCommand * table, according to the command name in argv[0]. */ diff --git a/src/debug.c b/src/debug.c index dd7cdffade..8a07518840 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1801,9 +1801,9 @@ void logRegisters(ucontext_t *uc) { #endif /* HAVE_BACKTRACE */ -/* Return a file descriptor to write directly to the Redis log with the +/* Return a file descriptor to write directly to the server log with the * write(2) syscall, that can be used in critical sections of the code - * where the rest of Redis can't be trusted (for example during the memory + * where the rest of server can't be trusted (for example during the memory * test) or when an API call requires a raw fd. * * Close it with closeDirectLogFiledes(). */ @@ -2035,7 +2035,7 @@ void logModulesInfo(void) { } /* Log information about the "current" client, that is, the client that is - * currently being served by Redis. May be NULL if Redis is not serving a + * currently being served by the server. May be NULL if the server is not serving a * client right now. */ void logCurrentClient(client *cc, const char *title) { if (cc == NULL) return; @@ -2161,7 +2161,7 @@ static void killMainThread(void) { /* Kill the running threads (other than current) in an unclean way. This function * should be used only when it's critical to stop the threads for some reason. - * Currently Redis does this only on crash (for instance on SIGSEGV) in order + * Currently the server does this only on crash (for instance on SIGSEGV) in order * to perform a fast memory check without other threads messing with memory. */ void killThreads(void) { killMainThread(); @@ -2394,10 +2394,10 @@ void bugReportEnd(int killViaSignal, int sig) { serverLogRawFromHandler(LL_WARNING|LL_RAW, "\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n" " Please report the crash by opening an issue on github:\n\n" -" http://github.com/redis/redis/issues\n\n" -" If a Redis module was involved, please open in the module's repo instead.\n\n" -" Suspect RAM error? Use redis-server --test-memory to verify it.\n\n" -" Some other issues could be detected by redis-server --check-system\n" +" http://github.com/valkey-io/valkey/issues\n\n" +" If a module was involved, please open in the module's repo instead.\n\n" +" Suspect RAM error? Use valkey-server --test-memory to verify it.\n\n" +" Some other issues could be detected by valkey-server --check-system\n" ); /* free(messages); Don't call free() with possibly corrupted memory. */ diff --git a/src/dict.c b/src/dict.c index d2767939bc..dcde86845e 100644 --- a/src/dict.c +++ b/src/dict.c @@ -50,7 +50,7 @@ /* Using dictSetResizeEnabled() we make possible to disable * resizing and rehashing of the hash table as needed. This is very important - * for Redis, as we use copy-on-write and don't want to move too much memory + * for us, as we use copy-on-write and don't want to move too much memory * around when there is a child performing saving operations. * * Note that even when dict_can_resize is set to DICT_RESIZE_AVOID, not all diff --git a/src/endianconv.c b/src/endianconv.c index 9344c39ad6..212bdcb522 100644 --- a/src/endianconv.c +++ b/src/endianconv.c @@ -4,7 +4,7 @@ * defined into endianconv.h, this way we define everything is a non-operation * if the arch is already little endian. * - * Redis tries to encode everything as little endian (but a few things that need + * The server tries to encode everything as little endian (but a few things that need * to be backward compatible are still in big endian) because most of the * production environments are little endian, and we have a lot of conversions * in a few places because ziplists, intsets, zipmaps, need to be endian-neutral diff --git a/src/eval.c b/src/eval.c index 6542246803..587fb3b7af 100644 --- a/src/eval.c +++ b/src/eval.c @@ -80,7 +80,7 @@ dictType shaScriptObjectDictType = { /* Lua context */ struct luaCtx { lua_State *lua; /* The Lua interpreter. We use just one for all clients */ - client *lua_client; /* The "fake client" to query Redis from Lua */ + client *lua_client; /* The "fake client" to query the server from Lua */ dict *lua_scripts; /* A dictionary of SHA1 -> Lua scripts */ list *lua_scripts_lru_list; /* A list of SHA1, first in first out LRU eviction. */ unsigned long long lua_scripts_mem; /* Cached scripts' memory + oh */ @@ -94,7 +94,7 @@ struct ldbState { int active; /* Are we debugging EVAL right now? */ int forked; /* Is this a fork()ed debugging session? */ list *logs; /* List of messages to send to the client. */ - list *traces; /* Messages about Redis commands executed since last stop.*/ + list *traces; /* Messages about commands executed since last stop.*/ list *children; /* All forked debugging sessions pids. */ int bp[LDB_BREAKPOINTS_MAX]; /* An array of breakpoints line numbers. */ int bpcount; /* Number of valid entries inside bp. */ @@ -184,7 +184,7 @@ int luaRedisReplicateCommandsCommand(lua_State *lua) { * This function is called the first time at server startup with * the 'setup' argument set to 1. * - * It can be called again multiple times during the lifetime of the Redis + * It can be called again multiple times during the lifetime of the * process, with 'setup' set to 0, and following a scriptingRelease() call, * in order to reset the Lua scripting environment. * @@ -252,7 +252,7 @@ void scriptingInit(int setup) { lua_pcall(lua,0,0,0); } - /* Create the (non connected) client that we use to execute Redis commands + /* Create the (non connected) client that we use to execute server commands * inside the Lua interpreter. * Note: there is no need to create it again when this function is called * by scriptingReset(). */ @@ -285,7 +285,7 @@ void freeLuaScriptsSync(dict *lua_scripts, list *lua_scripts_lru_list, lua_State * using libc. libc may take a bit longer to return the memory to the OS, * so after lua_close, we call malloc_trim try to purge it earlier. * - * We do that only when Redis itself does not use libc. When Lua and Redis + * We do that only when the server itself does not use libc. When Lua and the server * use different allocators, one won't use the fragmentation holes of the * other, and released memory can take a long time until it is returned to * the OS. */ @@ -763,7 +763,7 @@ unsigned long evalScriptsMemory(void) { } /* --------------------------------------------------------------------------- - * LDB: Redis Lua debugging facilities + * LDB: Lua debugging facilities * ------------------------------------------------------------------------- */ /* Initialize Lua debugger data structures. */ @@ -860,7 +860,7 @@ void ldbSendLogs(void) { /* Start a debugging session before calling EVAL implementation. * The technique we use is to capture the client socket file descriptor, * in order to perform direct I/O with it from within Lua hooks. This - * way we don't have to re-enter Redis in order to handle I/O. + * way we don't have to re-enter the server in order to handle I/O. * * The function returns 1 if the caller should proceed to call EVAL, * and 0 if instead the caller should abort the operation (this happens @@ -1053,7 +1053,7 @@ sds *ldbReplParseCommand(int *argcp, char** err) { sds copy = sdsdup(ldb.cbuf); char *p = copy; - /* This Redis protocol parser is a joke... just the simplest thing that + /* This RESP parser is a joke... just the simplest thing that * works in this context. It is also very forgiving regarding broken * protocol. */ @@ -1244,7 +1244,7 @@ char *ldbRedisProtocolToHuman_Null(sds *o, char *reply); char *ldbRedisProtocolToHuman_Bool(sds *o, char *reply); char *ldbRedisProtocolToHuman_Double(sds *o, char *reply); -/* Get Redis protocol from 'reply' and appends it in human readable form to +/* Get RESP from 'reply' and appends it in human readable form to * the passed SDS string 'o'. * * Note that the SDS string is passed by reference (pointer of pointer to @@ -1267,7 +1267,7 @@ char *ldbRedisProtocolToHuman(sds *o, char *reply) { } /* The following functions are helpers for ldbRedisProtocolToHuman(), each - * take care of a given Redis return type. */ + * take care of a given RESP return type. */ char *ldbRedisProtocolToHuman_Int(sds *o, char *reply) { char *p = strchr(reply+1,'\r'); @@ -1372,7 +1372,7 @@ char *ldbRedisProtocolToHuman_Double(sds *o, char *reply) { return p+2; } -/* Log a Redis reply as debugger output, in a human readable format. +/* Log a RESP reply as debugger output, in a human readable format. * If the resulting string is longer than 'len' plus a few more chars * used as prefix, it gets truncated. */ void ldbLogRedisReply(char *reply) { @@ -1515,9 +1515,9 @@ void ldbEval(lua_State *lua, sds *argv, int argc) { lua_pop(lua,1); } -/* Implement the debugger "redis" command. We use a trick in order to make +/* Implement the debugger "server" command. We use a trick in order to make * the implementation very simple: we just call the Lua redis.call() command - * implementation, with ldb.step enabled, so as a side effect the Redis command + * implementation, with ldb.step enabled, so as a side effect the command * and its reply are logged. */ void ldbRedis(lua_State *lua, sds *argv, int argc) { int j; diff --git a/src/evict.c b/src/evict.c index be8cfad75f..7b4937303c 100644 --- a/src/evict.c +++ b/src/evict.c @@ -102,7 +102,7 @@ unsigned long long estimateObjectIdleTime(robj *o) { /* LRU approximation algorithm * - * Redis uses an approximation of the LRU algorithm that runs in constant + * The server uses an approximation of the LRU algorithm that runs in constant * memory. Every time there is a key to expire, we sample N keys (with * N very small, usually in around 5) to populate a pool of best keys to * evict of M keys (the pool size is defined by EVPOOL_SIZE). @@ -436,7 +436,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev } /* Return 1 if used memory is more than maxmemory after allocating more memory, - * return 0 if not. Redis may reject user's requests or evict some keys if used + * return 0 if not. The server may reject user's requests or evict some keys if used * memory exceeds maxmemory, especially, when we allocate huge memory at once. */ int overMaxmemoryAfterAlloc(size_t moremem) { if (!server.maxmemory) return 0; /* No limit. */ @@ -517,10 +517,10 @@ static unsigned long evictionTimeLimitUs(void) { /* Check that memory usage is within the current "maxmemory" limit. If over * "maxmemory", attempt to free memory by evicting data (if it's safe to do so). * - * It's possible for Redis to suddenly be significantly over the "maxmemory" + * It's possible for the server to suddenly be significantly over the "maxmemory" * setting. This can happen if there is a large allocation (like a hash table * resize) or even if the "maxmemory" setting is manually adjusted. Because of - * this, it's important to evict for a managed period of time - otherwise Redis + * this, it's important to evict for a managed period of time - otherwise the server * would become unresponsive while evicting. * * The goal of this function is to improve the memory situation - not to diff --git a/src/expire.c b/src/expire.c index 4f0868d715..97b59a7871 100644 --- a/src/expire.c +++ b/src/expire.c @@ -46,7 +46,7 @@ static double avg_ttl_factor[16] = {0.98, 0.9604, 0.941192, 0.922368, 0.903921, /* Helper function for the activeExpireCycle() function. * This function will try to expire the key that is stored in the hash table - * entry 'de' of the 'expires' hash table of a Redis database. + * entry 'de' of the 'expires' hash table of a database. * * If the key is found to be expired, it is removed from the database and * 1 is returned. Otherwise no operation is performed and 0 is returned. @@ -259,7 +259,7 @@ void activeExpireCycle(int type) { /* Continue to expire if at the end of the cycle there are still * a big percentage of keys to expire, compared to the number of keys * we scanned. The percentage, stored in config_cycle_acceptable_stale - * is not fixed, but depends on the Redis configured "expire effort". */ + * is not fixed, but depends on the configured "expire effort". */ do { unsigned long num; iteration++; diff --git a/src/functions.c b/src/functions.c index bee5ce232b..b914288551 100644 --- a/src/functions.c +++ b/src/functions.c @@ -404,7 +404,7 @@ static int libraryJoin(functionsLibCtx *functions_lib_ctx_dst, functionsLibCtx * /* Register an engine, should be called once by the engine on startup and give the following: * * - engine_name - name of the engine to register - * - engine_ctx - the engine ctx that should be used by Redis to interact with the engine */ + * - engine_ctx - the engine ctx that should be used by the server to interact with the engine */ int functionsRegisterEngine(const char *engine_name, engine *engine) { sds engine_name_sds = sdsnew(engine_name); if (dictFetchValue(engines, engine_name_sds)) { diff --git a/src/functions.h b/src/functions.h index 5228560859..54712d0daf 100644 --- a/src/functions.h +++ b/src/functions.h @@ -31,7 +31,7 @@ #define __FUNCTIONS_H_ /* - * functions.c unit provides the Redis Functions API: + * functions.c unit provides the Functions API: * * FUNCTION LOAD * * FUNCTION LIST * * FUNCTION CALL (FCALL and FCALL_RO) @@ -68,7 +68,7 @@ typedef struct engine { int (*create)(void *engine_ctx, functionLibInfo *li, sds code, size_t timeout, sds *err); /* Invoking a function, r_ctx is an opaque object (from engine POV). - * The r_ctx should be used by the engine to interaction with Redis, + * The r_ctx should be used by the engine to interaction with the server, * such interaction could be running commands, set resp, or set * replication mode */ diff --git a/src/geo.c b/src/geo.c index 1004193e03..19266dce92 100644 --- a/src/geo.c +++ b/src/geo.c @@ -34,7 +34,7 @@ #include "pqsort.h" /* Things exported from t_zset.c only for geo.c, since it is the only other - * part of Redis that requires close zset introspection. */ + * part of the server that requires close zset introspection. */ unsigned char *zzlFirstInRange(unsigned char *zl, zrangespec *range); int zslValueLteMax(double value, zrangespec *spec); @@ -246,7 +246,7 @@ int geoWithinShape(GeoShape *shape, double score, double *xy, double *distance) return C_OK; } -/* Query a Redis sorted set to extract all the elements between 'min' and +/* Query a sorted set to extract all the elements between 'min' and * 'max', appending them into the array of geoPoint structures 'geoArray'. * The command returns the number of elements added to the array. * diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 4f6f1eb454..beb5b179ac 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -1,5 +1,5 @@ -/* hyperloglog.c - Redis HyperLogLog probabilistic cardinality approximation. - * This file implements the algorithm and the exported Redis commands. +/* hyperloglog.c - HyperLogLog probabilistic cardinality approximation. + * This file implements the algorithm and the exported commands. * * Copyright (c) 2014, Salvatore Sanfilippo * All rights reserved. @@ -34,14 +34,14 @@ #include #include -/* The Redis HyperLogLog implementation is based on the following ideas: +/* The HyperLogLog implementation is based on the following ideas: * * * The use of a 64 bit hash function as proposed in [1], in order to estimate * cardinalities larger than 10^9, at the cost of just 1 additional bit per * register. * * The use of 16384 6-bit registers for a great level of accuracy, using * a total of 12k per key. - * * The use of the Redis string data type. No new type is introduced. + * * The use of the string data type. No new type is introduced. * * No attempt is made to compress the data structure as in [1]. Also the * algorithm used is the original HyperLogLog Algorithm as in [2], with * the only difference that a 64 bit hash function is used, so no correction @@ -53,7 +53,7 @@ * [2] P. Flajolet, Éric Fusy, O. Gandouet, and F. Meunier. Hyperloglog: The * analysis of a near-optimal cardinality estimation algorithm. * - * Redis uses two representations: + * We use two representations: * * 1) A "dense" representation where every entry is represented by * a 6-bit integer. @@ -88,7 +88,7 @@ * Dense representation * === * - * The dense representation used by Redis is the following: + * The dense representation is the following: * * +--------+--------+--------+------// //--+ * |11000000|22221111|33333322|55444444 .... | @@ -391,7 +391,7 @@ static char *invalid_hll_err = "-INVALIDOBJ Corrupted HLL object detected"; /* ========================= HyperLogLog algorithm ========================= */ /* Our hash function is MurmurHash2, 64 bit version. - * It was modified for Redis in order to provide the same result in + * It was modified in order to provide the same result in * big and little endian archs (endian neutral). */ REDIS_NO_SANITIZE("alignment") uint64_t MurmurHash64A (const void * key, int len, unsigned int seed) { @@ -520,7 +520,7 @@ int hllDenseAdd(uint8_t *registers, unsigned char *ele, size_t elesize) { void hllDenseRegHisto(uint8_t *registers, int* reghisto) { int j; - /* Redis default is to use 16384 registers 6 bits each. The code works + /* Default is to use 16384 registers 6 bits each. The code works * with other values by modifying the defines, but for our target value * we take a faster path with unrolled loops. */ if (HLL_REGISTERS == 16384 && HLL_BITS == 6) { @@ -1271,7 +1271,7 @@ void pfcountCommand(client *c) { * The user specified a single key. Either return the cached value * or compute one and update the cache. * - * Since a HLL is a regular Redis string type value, updating the cache does + * Since a HLL is a regular string type value, updating the cache does * modify the value. We do a lookupKeyRead anyway since this is flagged as a * read-only command. The difference is that with lookupKeyWrite, a * logically expired key on a replica is deleted, while with lookupKeyRead @@ -1315,7 +1315,7 @@ void pfcountCommand(client *c) { hdr->card[6] = (card >> 48) & 0xff; hdr->card[7] = (card >> 56) & 0xff; /* This is considered a read-only command even if the cached value - * may be modified and given that the HLL is a Redis string + * may be modified and given that the HLL is a string * we need to propagate the change. */ signalModifiedKey(c,c->db,c->argv[1]); server.dirty++; diff --git a/src/kvstore.c b/src/kvstore.c index acfe62bb85..b3b166a0aa 100644 --- a/src/kvstore.c +++ b/src/kvstore.c @@ -4,7 +4,7 @@ * The purpose of this KV store is to have easy access to all keys that belong * in the same dict (i.e. are in the same dict-index) * - * For example, when Redis is running in cluster mode, we use kvstore to save + * For example, when the server is running in cluster mode, we use kvstore to save * all keys that map to the same hash-slot in a separate dict within the kvstore * struct. * This enables us to easily access all keys that map to a specific hash-slot. diff --git a/src/latency.c b/src/latency.c index b5ec6e3823..d900011c8f 100644 --- a/src/latency.c +++ b/src/latency.c @@ -1,5 +1,5 @@ /* The latency monitor allows to easily observe the sources of latency - * in a Redis instance using the LATENCY command. Different latency + * in an instance using the LATENCY command. Different latency * sources are monitored, like disk I/O, execution of commands, fork * system call, and so forth. * @@ -198,7 +198,7 @@ void analyzeLatencyForEvent(char *event, struct latencyStats *ls) { if (ls->samples) ls->mad = sum / ls->samples; } -/* Create a human readable report of latency events for this Redis instance. */ +/* Create a human readable report of latency events for this instance. */ sds createLatencyReport(void) { sds report = sdsempty(); int advise_better_vm = 0; /* Better virtual machines. */ diff --git a/src/lazyfree.c b/src/lazyfree.c index 4733d8f99a..22fa2dc863 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -172,7 +172,7 @@ void freeObjAsync(robj *key, robj *obj, int dbid) { size_t free_effort = lazyfreeGetFreeEffort(key,obj,dbid); /* Note that if the object is shared, to reclaim it now it is not * possible. This rarely happens, however sometimes the implementation - * of parts of the Redis core may call incrRefCount() to protect + * of parts of the server core may call incrRefCount() to protect * objects, and then call dbDelete(). */ if (free_effort > LAZYFREE_THRESHOLD && obj->refcount == 1) { atomicIncr(lazyfree_objects,1); @@ -182,7 +182,7 @@ void freeObjAsync(robj *key, robj *obj, int dbid) { } } -/* Empty a Redis DB asynchronously. What the function does actually is to +/* Empty a DB asynchronously. What the function does actually is to * create a new empty set of hash tables and scheduling the old ones for * lazy freeing. */ void emptyDbAsync(serverDb *db) { diff --git a/src/listpack.c b/src/listpack.c index 27becc9faa..8816a663cb 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -165,7 +165,7 @@ int lpSafeToAdd(unsigned char* lp, size_t add) { * * ----------------------------------------------------------------------------- * - * Credits: this function was adapted from the Redis source code, file + * Credits: this function was adapted from the Redis OSS source code, file * "utils.c", function string2ll(), and is copyright: * * Copyright(C) 2011, Pieter Noordhuis diff --git a/src/localtime.c b/src/localtime.c index 1cefdfa88c..da2d6c9d09 100644 --- a/src/localtime.c +++ b/src/localtime.c @@ -31,9 +31,9 @@ /* This is a safe version of localtime() which contains no locks and is * fork() friendly. Even the _r version of localtime() cannot be used safely - * in Redis. Another thread may be calling localtime() while the main thread + * in the server. Another thread may be calling localtime() while the main thread * forks(). Later when the child process calls localtime() again, for instance - * in order to log something to the Redis log, it may deadlock: in the copy + * in order to log something to the server log, it may deadlock: in the copy * of the address space of the forked process the lock will never be released. * * This function takes the timezone 'tz' as argument, and the 'dst' flag is @@ -47,7 +47,7 @@ * should be refreshed at safe times. * * Note that this function does not work for dates < 1/1/1970, it is solely - * designed to work with what time(NULL) may return, and to support Redis + * designed to work with what time(NULL) may return, and to support server * logging of the dates, it's not really a complete implementation. */ static int is_leap_year(time_t year) { if (year % 4) return 0; /* A year not divisible by 4 is not leap. */ diff --git a/src/logreqres.c b/src/logreqres.c index 6e7621d35d..bd19483e48 100644 --- a/src/logreqres.c +++ b/src/logreqres.c @@ -143,7 +143,7 @@ void reqresReset(client *c, int free_buf) { * Ideally, we would just have this code inside reqresAppendRequest, which is called * from processCommand, but we cannot save the reply offset inside processCommand * because of the following pipe-lining scenario: - * set rd [redis_deferring_client] + * set rd [valkey_deferring_client] * set buf "" * append buf "SET key vale\r\n" * append buf "BLPOP mylist 0\r\n" diff --git a/src/lolwut.c b/src/lolwut.c index eca98cf18e..268353898d 100644 --- a/src/lolwut.c +++ b/src/lolwut.c @@ -30,7 +30,7 @@ * * This file implements the LOLWUT command. The command should do something * fun and interesting, and should be replaced by a new implementation at - * each new version of Redis. + * each new version of the server. */ #include "server.h" @@ -41,7 +41,7 @@ void lolwut5Command(client *c); void lolwut6Command(client *c); /* The default target for LOLWUT if no matching version was found. - * This is what unstable versions of Redis will display. */ + * This is what unstable versions of the server will display. */ void lolwutUnstableCommand(client *c) { sds rendered = sdsnew("Redis ver. "); rendered = sdscat(rendered,VALKEY_VERSION); diff --git a/src/lolwut5.c b/src/lolwut5.c index d293e77d97..b7d4c77d71 100644 --- a/src/lolwut5.c +++ b/src/lolwut5.c @@ -30,7 +30,7 @@ * * This file implements the LOLWUT command. The command should do something * fun and interesting, and should be replaced by a new implementation at - * each new version of Redis. + * each new version of the server. */ #include "server.h" @@ -61,7 +61,7 @@ void lwTranslatePixelsGroup(int byte, char *output) { output[2] = 0x80 | (code & 0x3F); /* 10-xxxxxx */ } -/* Schotter, the output of LOLWUT of Redis 5, is a computer graphic art piece +/* Schotter, the output of LOLWUT of Redis OSS 5, is a computer graphic art piece * generated by Georg Nees in the 60s. It explores the relationship between * caos and order. * diff --git a/src/lolwut6.c b/src/lolwut6.c index 440dba9660..4e59c68703 100644 --- a/src/lolwut6.c +++ b/src/lolwut6.c @@ -30,7 +30,7 @@ * * This file implements the LOLWUT command. The command should do something * fun and interesting, and should be replaced by a new implementation at - * each new version of Redis. + * each new version of the server. * * Thanks to Michele Hiki Falcone for the original image that inspired * the image, part of his game, Plaguemon. diff --git a/src/module.c b/src/module.c index ec8e605d54..d43feba28f 100644 --- a/src/module.c +++ b/src/module.c @@ -31,7 +31,7 @@ * Modules API documentation information * * The comments in this file are used to generate the API documentation on the - * Redis website. + * website. * * Each function starting with VM_ and preceded by a block comment is included * in the API documentation. To hide a VM_ function, put a blank line between @@ -69,7 +69,7 @@ /* -------------------------------------------------------------------------- * Private data structures used by the modules system. Those are data - * structures that are never exposed to Redis Modules, if not as void + * structures that are never exposed to Modules, if not as void * pointers that have an API the module can call with them) * -------------------------------------------------------------------------- */ @@ -109,7 +109,7 @@ struct AutoMemEntry { #define VALKEYMODULE_AM_DICT 4 #define VALKEYMODULE_AM_INFO 5 -/* The pool allocator block. Redis Modules can allocate memory via this special +/* The pool allocator block. Modules can allocate memory via this special * allocator that will automatically release it all once the callback returns. * This means that it can only be used for ephemeral allocations. However * there are two advantages for modules to use this API: @@ -132,7 +132,7 @@ typedef struct ValkeyModulePoolAllocBlock { char memory[]; } ValkeyModulePoolAllocBlock; -/* This structure represents the context in which Redis modules operate. +/* This structure represents the context in which modules operate. * Most APIs module can access, get a pointer to the context, so that the API * implementation can hold state across calls, or remember what to free after * the call and so forth. @@ -187,7 +187,7 @@ typedef struct ValkeyModuleCtx ValkeyModuleCtx; #define VALKEYMODULE_CTX_COMMAND (1<<9) /* Context created to serve a command from call() or AOF (which calls cmd->proc directly) */ -/* This represents a Redis key opened with VM_OpenKey(). */ +/* This represents a key opened with VM_OpenKey(). */ struct ValkeyModuleKey { ValkeyModuleCtx *ctx; serverDb *db; @@ -229,7 +229,7 @@ struct ValkeyModuleKey { #define VALKEYMODULE_ZSET_RANGE_POS 3 /* Function pointer type of a function representing a command inside - * a Redis module. */ + * a module. */ struct ValkeyModuleBlockedClient; typedef int (*ValkeyModuleCmdFunc) (ValkeyModuleCtx *ctx, void **argv, int argc); typedef int (*ValkeyModuleAuthCallback)(ValkeyModuleCtx *ctx, void *username, void *password, ValkeyModuleString **err); @@ -399,15 +399,15 @@ typedef struct ValkeyModuleServerInfoData { #define VALKEYMODULE_ARGV_DRY_RUN (1<<10) #define VALKEYMODULE_ARGV_ALLOW_BLOCK (1<<11) -/* Determine whether Redis should signalModifiedKey implicitly. +/* Determine whether the server should signalModifiedKey implicitly. * In case 'ctx' has no 'module' member (and therefore no module->options), - * we assume default behavior, that is, Redis signals. + * we assume default behavior, that is, the server signals. * (see VM_GetThreadSafeContext) */ #define SHOULD_SIGNAL_MODIFIED_KEYS(ctx) \ ((ctx)->module? !((ctx)->module->options & VALKEYMODULE_OPTION_NO_IMPLICIT_SIGNAL_MODIFIED) : 1) /* Server events hooks data structures and defines: this modules API - * allow modules to subscribe to certain events in Redis, such as + * allow modules to subscribe to certain events in the server, such as * the start and end of an RDB or AOF save, the change of role in replication, * and similar other events. */ @@ -419,13 +419,13 @@ typedef struct ValkeyModuleEventListener { list *ValkeyModule_EventListeners; /* Global list of all the active events. */ -/* Data structures related to the redis module users */ +/* Data structures related to the module users */ /* This is the object returned by VM_CreateModuleUser(). The module API is * able to create users, set ACLs to such users, and later authenticate * clients using such newly created users. */ typedef struct ValkeyModuleUser { - user *user; /* Reference to the real redis user */ + user *user; /* Reference to the real user */ int free_user; /* Indicates that user should also be freed when this object is freed */ } ValkeyModuleUser; @@ -439,7 +439,7 @@ typedef struct ValkeyModuleKeyOptCtx { as `copy2`, 'from_dbid' and 'to_dbid' are both valid. */ } ValkeyModuleKeyOptCtx; -/* Data structures related to redis module configurations */ +/* Data structures related to module configurations */ /* The function signatures for module config get callbacks. These are identical to the ones exposed in valkeymodule.h. */ typedef ValkeyModuleString * (*ValkeyModuleConfigGetStringFunc)(const char *name, void *privdata); typedef long long (*ValkeyModuleConfigGetNumericFunc)(const char *name, void *privdata); @@ -513,13 +513,13 @@ int moduleVerifyResourceName(const char *name); /* -------------------------------------------------------------------------- * ## Heap allocation raw functions * - * Memory allocated with these functions are taken into account by Redis key - * eviction algorithms and are reported in Redis memory usage information. + * Memory allocated with these functions are taken into account by key + * eviction algorithms and are reported in memory usage information. * -------------------------------------------------------------------------- */ /* Use like malloc(). Memory allocated with this function is reported in - * Redis INFO memory, used for keys eviction according to maxmemory settings - * and in general is taken into account as memory allocated by Redis. + * INFO memory, used for keys eviction according to maxmemory settings + * and in general is taken into account as memory allocated by the server. * You should avoid using malloc(). * This function panics if unable to allocate enough memory. */ void *VM_Alloc(size_t bytes) { @@ -540,8 +540,8 @@ void *VM_TryAlloc(size_t bytes) { } /* Use like calloc(). Memory allocated with this function is reported in - * Redis INFO memory, used for keys eviction according to maxmemory settings - * and in general is taken into account as memory allocated by Redis. + * INFO memory, used for keys eviction according to maxmemory settings + * and in general is taken into account as memory allocated by the server. * You should avoid using calloc() directly. */ void *VM_Calloc(size_t nmemb, size_t size) { return zcalloc_usable(nmemb*size,NULL); @@ -848,7 +848,7 @@ void moduleFreeContext(ValkeyModuleCtx *ctx) { } static CallReply *moduleParseReply(client *c, ValkeyModuleCtx *ctx) { - /* Convert the result of the Redis command into a module reply. */ + /* Convert the result of the command into a module reply. */ sds proto = sdsnewlen(c->buf,c->bufpos); c->bufpos = 0; while(listLength(c->reply)) { @@ -923,7 +923,7 @@ void moduleCreateContext(ValkeyModuleCtx *out_ctx, ValkeyModule *module, int ctx } } -/* This Redis command binds the normal Redis command invocation with commands +/* This command binds the normal command invocation with commands * exported by modules. */ void ValkeyModuleCommandDispatcher(client *c) { ValkeyModuleCommand *cp = c->cmd->module_cmd; @@ -940,7 +940,7 @@ void ValkeyModuleCommandDispatcher(client *c) { * the client argument vectors: sometimes this will result in the SDS * string having unused space at the end. Later if a module takes ownership * of the RedisString, such space will be wasted forever. Inside the - * Redis core this is not a problem because tryObjectEncoding() is called + * server core this is not a problem because tryObjectEncoding() is called * before storing strings in the key space. Here we need to do it * for the module. */ for (int i = 0; i < c->argc; i++) { @@ -999,7 +999,7 @@ int moduleGetCommandChannelsViaAPI(struct serverCommand *cmd, robj **argv, int a /* -------------------------------------------------------------------------- * ## Commands API * - * These functions are used to implement custom Redis commands. + * These functions are used to implement custom commands. * * For examples, see https://redis.io/topics/modules-intro. * -------------------------------------------------------------------------- */ @@ -1076,8 +1076,8 @@ int VM_IsChannelsPositionRequest(ValkeyModuleCtx *ctx) { * * VALKEYMODULE_CMD_CHANNEL_PUBLISH: This command will publish to this channel. * * VALKEYMODULE_CMD_CHANNEL_PATTERN: Instead of acting on a specific channel, will act on any * channel specified by the pattern. This is the same access - * used by the PSUBSCRIBE and PUNSUBSCRIBE commands available - * in Redis. Not intended to be used with PUBLISH permissions. + * used by the PSUBSCRIBE and PUNSUBSCRIBE commands. + * Not intended to be used with PUBLISH permissions. * * The following is an example of how it could be used: * @@ -1134,7 +1134,7 @@ int isCommandNameValid(const char *name) { } /* Helper for VM_CreateCommand(). Turns a string representing command - * flags into the command flags used by the Redis core. + * flags into the command flags used by the server core. * * It returns the set of flags, or -1 if unknown flags are found. */ int64_t commandFlagsFromString(char *s) { @@ -1172,7 +1172,7 @@ int64_t commandFlagsFromString(char *s) { ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, sds declared_name, sds fullname, ValkeyModuleCmdFunc cmdfunc, int64_t flags, int firstkey, int lastkey, int keystep); -/* Register a new command in the Redis server, that will be handled by +/* Register a new command in the server, that will be handled by * calling the function pointer 'cmdfunc' using the ValkeyModule calling * convention. * @@ -1213,7 +1213,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, sds d * * **"pubsub"**: The command publishes things on Pub/Sub channels. * * **"random"**: The command may have different outputs even starting * from the same input arguments and key values. - * Starting from Redis 7.0 this flag has been deprecated. + * Starting from Redis OSS 7.0 this flag has been deprecated. * Declaring a command as "random" can be done using * command tips, see https://redis.io/topics/command-tips. * * **"allow-stale"**: The command is allowed to run on slaves that don't @@ -1230,7 +1230,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, sds d * * **"getkeys-api"**: The command implements the interface to return * the arguments that are keys. Used when start/stop/step * is not enough because of the command syntax. - * * **"no-cluster"**: The command should not register in Redis Cluster + * * **"no-cluster"**: The command should not register in Cluster * since is not designed to work with it because, for * example, is unable to report the position of the * keys, programmatically creates key names, or any @@ -1249,7 +1249,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, sds d * the arguments that are channels. * * The last three parameters specify which arguments of the new command are - * Redis keys. See https://redis.io/commands/command for more information. + * keys. See https://valkey.io/commands/command for more information. * * * `firstkey`: One-based index of the first argument that's a key. * Position 0 is always the command name itself. @@ -1267,7 +1267,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, sds d * only be used to find keys that exist at constant indices. * For non-trivial key arguments, you may pass 0,0,0 and use * ValkeyModule_SetCommandInfo to set key specs using a more advanced scheme and use - * ValkeyModule_SetCommandACLCategories to set Redis ACL categories of the commands. */ + * ValkeyModule_SetCommandACLCategories to set ACL categories of the commands. */ int VM_CreateCommand(ValkeyModuleCtx *ctx, const char *name, ValkeyModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep) { if (!ctx->module->onload) return VALKEYMODULE_ERR; @@ -1307,7 +1307,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, sds d /* Create a command "proxy", which is a structure that is referenced * in the command table, so that the generic command that works as - * binding between modules and Redis, can know what function to call + * binding between modules and the server, can know what function to call * and what the module is. */ cp = zcalloc(sizeof(*cp)); cp->module = module; @@ -1518,7 +1518,7 @@ int matchAclCategoryFlag(char *flag, int64_t *acl_categories_flags) { } /* Helper for VM_SetCommandACLCategories(). Turns a string representing acl category - * flags into the acl category flags used by Redis ACL which allows users to access + * flags into the acl category flags used by the server ACL which allows users to access * the module commands by acl categories. * * It returns the set of acl flags, or -1 if unknown flags are found. */ @@ -1584,7 +1584,7 @@ int VM_SetCommandACLCategories(ValkeyModuleCommand *command, const char *aclflag * * All fields except `version` are optional. Explanation of the fields: * - * - `version`: This field enables compatibility with different Redis versions. + * - `version`: This field enables compatibility with different server versions. * Always set this field to VALKEYMODULE_COMMAND_INFO_VERSION. * * - `summary`: A short description of the command (optional). @@ -1592,7 +1592,7 @@ int VM_SetCommandACLCategories(ValkeyModuleCommand *command, const char *aclflag * - `complexity`: Complexity description (optional). * * - `since`: The version where the command was introduced (optional). - * Note: The version specified should be the module's, not Redis version. + * Note: The version specified should be the module's, not the server version. * * - `history`: An array of ValkeyModuleCommandHistoryEntry (optional), which is * a struct with the following fields: @@ -1609,7 +1609,7 @@ int VM_SetCommandACLCategories(ValkeyModuleCommand *command, const char *aclflag * * - `arity`: Number of arguments, including the command name itself. A positive * number specifies an exact number of arguments and a negative number - * specifies a minimum number of arguments, so use -N to say >= N. Redis + * specifies a minimum number of arguments, so use -N to say >= N. The server * validates a call before passing it to a module, so this can replace an * arity check inside the module command implementation. A value of 0 (or an * omitted arity field) is equivalent to -2 if the command has sub commands @@ -1626,7 +1626,7 @@ int VM_SetCommandACLCategories(ValkeyModuleCommand *command, const char *aclflag * * Key-specs cause the triplet (firstkey, lastkey, keystep) given in * VM_CreateCommand to be recomputed, but it is still useful to provide - * these three parameters in VM_CreateCommand, to better support old Redis + * these three parameters in VM_CreateCommand, to better support old server * versions where VM_SetCommandInfo is not available. * * Note that key-specs don't fully replace the "getkeys-api" (see @@ -2283,7 +2283,7 @@ void moduleListFree(void *config) { void VM_SetModuleAttribs(ValkeyModuleCtx *ctx, const char *name, int ver, int apiver) { /* Called by VM_Init() to setup the `ctx->module` structure. * - * This is an internal function, Redis modules developers don't need + * This is an internal function, module developers don't need * to use it. */ ValkeyModule *module; @@ -2376,21 +2376,21 @@ int VM_BlockedClientMeasureTimeEnd(ValkeyModuleBlockedClient *bc) { return VALKEYMODULE_OK; } -/* This API allows modules to let Redis process background tasks, and some +/* This API allows modules to let the server process background tasks, and some * commands during long blocking execution of a module command. * The module can call this API periodically. * The flags is a bit mask of these: * * - `VALKEYMODULE_YIELD_FLAG_NONE`: No special flags, can perform some background * operations, but not process client commands. - * - `VALKEYMODULE_YIELD_FLAG_CLIENTS`: Redis can also process client commands. + * - `VALKEYMODULE_YIELD_FLAG_CLIENTS`: The server can also process client commands. * * The `busy_reply` argument is optional, and can be used to control the verbose * error string after the `-BUSY` error code. * - * When the `VALKEYMODULE_YIELD_FLAG_CLIENTS` is used, Redis will only start + * When the `VALKEYMODULE_YIELD_FLAG_CLIENTS` is used, the server will only start * processing client commands after the time defined by the - * `busy-reply-threshold` config, in which case Redis will start rejecting most + * `busy-reply-threshold` config, in which case the server will start rejecting most * commands with `-BUSY` error, but allow the ones marked with the `allow-busy` * flag to be executed. * This API can also be used in thread safe context (while locked), and during @@ -2407,10 +2407,10 @@ void VM_Yield(ValkeyModuleCtx *ctx, int flags, const char *busy_reply) { long long now = getMonotonicUs(); if (now >= ctx->next_yield_time) { /* In loading mode, there's no need to handle busy_module_yield_reply, - * and busy_module_yield_flags, since redis is anyway rejecting all + * and busy_module_yield_flags, since the server is anyway rejecting all * commands with -LOADING. */ if (server.loading) { - /* Let redis process events */ + /* Let the server process events */ processEventsWhileBlocked(); } else { const char *prev_busy_module_yield_reply = server.busy_module_yield_reply; @@ -2425,7 +2425,7 @@ void VM_Yield(ValkeyModuleCtx *ctx, int flags, const char *busy_reply) { if (flags & VALKEYMODULE_YIELD_FLAG_CLIENTS) server.busy_module_yield_flags |= BUSY_MODULE_YIELD_CLIENTS; - /* Let redis process events */ + /* Let the server process events */ if (!pthread_equal(server.main_thread_id, pthread_self())) { /* If we are not in the main thread, we defer event loop processing to the main thread * after the main thread enters acquiring GIL state in order to protect the event @@ -2482,11 +2482,11 @@ void VM_Yield(ValkeyModuleCtx *ctx, int flags, const char *busy_reply) { * * VALKEYMODULE_OPTIONS_HANDLE_REPL_ASYNC_LOAD: * Setting this flag indicates module awareness of diskless async replication (repl-diskless-load=swapdb) - * and that redis could be serving reads during replication instead of blocking with LOADING status. + * and that the server could be serving reads during replication instead of blocking with LOADING status. * * VALKEYMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS: * Declare that the module wants to get nested key-space notifications. - * By default, Redis will not fire key-space notifications that happened inside + * By default, the server will not fire key-space notifications that happened inside * a key-space notification callback. This flag allows to change this behavior * and fire nested key-space notifications. Notice: if enabled, the module * should protected itself from infinite recursion. */ @@ -2516,7 +2516,7 @@ int VM_SignalModifiedKey(ValkeyModuleCtx *ctx, ValkeyModuleString *keyname) { * that wants to use automatic memory. * * When enabled, automatic memory management tracks and automatically frees - * keys, call replies and Redis string objects once the command returns. In most + * keys, call replies and RedisModuleString objects once the command returns. In most * cases this eliminates the need of calling the following functions: * * 1. ValkeyModule_CloseKey() @@ -2725,7 +2725,7 @@ ValkeyModuleString *VM_CreateStringFromStreamID(ValkeyModuleCtx *ctx, const Valk return o; } -/* Free a module string object obtained with one of the Redis modules API calls +/* Free a module string object obtained with one of the module API calls * that return new string objects. * * It is possible to call this function even when automatic memory management @@ -2984,11 +2984,11 @@ int VM_StringAppendBuffer(ValkeyModuleCtx *ctx, ValkeyModuleString *str, const c * string in a module command before the string is potentially available * to other threads is generally safe. * - * Currently, Redis may also automatically trim retained strings when a + * Currently, the server may also automatically trim retained strings when a * module command returns. However, doing this explicitly should still be * a preferred option: * - * 1. Future versions of Redis may abandon auto-trimming. + * 1. Future versions of the server may abandon auto-trimming. * 2. Auto-trimming as currently implemented is *not thread safe*. * A background thread manipulating a recently retained string may end up * in a race condition with the auto-trim, which could result with @@ -3452,7 +3452,7 @@ int VM_ReplyWithBool(ValkeyModuleCtx *ctx, int b) { return VALKEYMODULE_OK; } -/* Reply exactly what a Redis command returned us with ValkeyModule_Call(). +/* Reply exactly what a command returned us with ValkeyModule_Call(). * This function is useful when we use ValkeyModule_Call() in order to * execute some command, as we want to reply to the client exactly the * same reply we obtained by the command. @@ -3565,8 +3565,8 @@ int VM_ReplyWithLongDouble(ValkeyModuleCtx *ctx, long double ld) { * #### Note about calling this function from a thread safe context: * * Normally when you call this function from the callback implementing a - * module command, or any other callback provided by the Redis Module API, - * Redis will accumulate all the calls to this function in the context of + * module command, or any other callback provided by the Module API, + * The server will accumulate all the calls to this function in the context of * the callback, and will propagate all the commands wrapped in a MULTI/EXEC * transaction. However when calling this function from a threaded safe context * that can live an undefined amount of time, and can be locked/unlocked in @@ -3838,7 +3838,7 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { /* Return the current context's flags. The flags provide information on the * current request context (whether the client is a Lua script or in a MULTI), - * and about the Redis instance in general, i.e replication and persistence. + * and about the instance in general, i.e replication and persistence. * * It is possible to call this function even with a NULL context, however * in this case the following flags will not be reported: @@ -3854,15 +3854,15 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { * * VALKEYMODULE_CTX_FLAGS_REPLICATED: The command was sent over the replication * link by the MASTER * - * * VALKEYMODULE_CTX_FLAGS_PRIMARY: The Redis instance is a primary + * * VALKEYMODULE_CTX_FLAGS_PRIMARY: The instance is a primary * - * * VALKEYMODULE_CTX_FLAGS_REPLICA: The Redis instance is a replica + * * VALKEYMODULE_CTX_FLAGS_REPLICA: The instance is a replica * - * * VALKEYMODULE_CTX_FLAGS_READONLY: The Redis instance is read-only + * * VALKEYMODULE_CTX_FLAGS_READONLY: The instance is read-only * - * * VALKEYMODULE_CTX_FLAGS_CLUSTER: The Redis instance is in cluster mode + * * VALKEYMODULE_CTX_FLAGS_CLUSTER: The instance is in cluster mode * - * * VALKEYMODULE_CTX_FLAGS_AOF: The Redis instance has AOF enabled + * * VALKEYMODULE_CTX_FLAGS_AOF: The instance has AOF enabled * * * VALKEYMODULE_CTX_FLAGS_RDB: The instance has RDB enabled * @@ -3871,7 +3871,7 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { * * VALKEYMODULE_CTX_FLAGS_EVICT: Maxmemory is set and has an eviction * policy that may delete keys * - * * VALKEYMODULE_CTX_FLAGS_OOM: Redis is out of memory according to the + * * VALKEYMODULE_CTX_FLAGS_OOM: The server is out of memory according to the * maxmemory setting. * * * VALKEYMODULE_CTX_FLAGS_OOM_WARNING: Less than 25% of memory remains before @@ -3897,13 +3897,13 @@ int VM_GetSelectedDb(ValkeyModuleCtx *ctx) { * * VALKEYMODULE_CTX_FLAGS_MULTI_DIRTY: The next EXEC will fail due to dirty * CAS (touched keys). * - * * VALKEYMODULE_CTX_FLAGS_IS_CHILD: Redis is currently running inside + * * VALKEYMODULE_CTX_FLAGS_IS_CHILD: The server is currently running inside * background child process. * * * VALKEYMODULE_CTX_FLAGS_RESP3: Indicate the that client attached to this * context is using RESP3. * - * * VALKEYMODULE_CTX_FLAGS_SERVER_STARTUP: The Redis instance is starting + * * VALKEYMODULE_CTX_FLAGS_SERVER_STARTUP: The instance is starting */ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { int flags = 0; @@ -3989,7 +3989,7 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { if (hasActiveChildProcess()) flags |= VALKEYMODULE_CTX_FLAGS_ACTIVE_CHILD; if (server.in_fork_child) flags |= VALKEYMODULE_CTX_FLAGS_IS_CHILD; - /* Non-empty server.loadmodule_queue means that Redis is starting. */ + /* Non-empty server.loadmodule_queue means that the server is starting. */ if (listLength(server.loadmodule_queue) > 0) flags |= VALKEYMODULE_CTX_FLAGS_SERVER_STARTUP; @@ -3997,7 +3997,7 @@ int VM_GetContextFlags(ValkeyModuleCtx *ctx) { } /* Returns true if a client sent the CLIENT PAUSE command to the server or - * if Redis Cluster does a manual failover, pausing the clients. + * if the Cluster does a manual failover, pausing the clients. * This is needed when we have a master with replicas, and want to write, * without adding further data to the replication channel, that the replicas * replication offset, match the one of the master. When this happens, it is @@ -4024,7 +4024,7 @@ int VM_AvoidReplicaTraffic(void) { * is out of range. * * Note that the client will retain the currently selected DB even after - * the Redis command implemented by the module calling this function + * the command implemented by the module calling this function * returns. * * If the module command wishes to change something in a different DB and @@ -4066,7 +4066,7 @@ static void moduleInitKeyTypeSpecific(ValkeyModuleKey *key) { } } -/* Return a handle representing a Redis key, so that it is possible +/* Return a handle representing a key, so that it is possible * to call other APIs with the key handle as argument to perform * operations on the key. * @@ -4116,7 +4116,7 @@ ValkeyModuleKey *VM_OpenKey(ValkeyModuleCtx *ctx, robj *keyname, int mode) { /** * Returns the full OpenKey modes mask, using the return value * the module can check if a certain set of OpenKey modes are supported - * by the redis server version in use. + * by the server version in use. * Example: * * int supportedMode = VM_GetOpenKeyModesAll(); @@ -4524,7 +4524,7 @@ int moduleListIteratorSeek(ValkeyModuleKey *key, long index, int mode) { * - ENOTSUP if the key is of another type than list. * - EBADF if the key is not opened for writing. * - * Note: Before Redis 7.0, `errno` was not set by this function. */ + * Note: Before Redis OSS 7.0, `errno` was not set by this function. */ int VM_ListPush(ValkeyModuleKey *key, int where, ValkeyModuleString *ele) { if (!key || !ele) { errno = EINVAL; @@ -4558,7 +4558,7 @@ int VM_ListPush(ValkeyModuleKey *key, int where, ValkeyModuleString *ele) { * - ENOTSUP if the key is empty or of another type than list. * - EBADF if the key is not opened for writing. * - * Note: Before Redis 7.0, `errno` was not set by this function. */ + * Note: Before Redis OSS 7.0, `errno` was not set by this function. */ ValkeyModuleString *VM_ListPop(ValkeyModuleKey *key, int where) { if (!key) { errno = EINVAL; @@ -4976,7 +4976,7 @@ int VM_ZsetLastInScoreRange(ValkeyModuleKey *key, double min, double max, int mi * VALKEYMODULE_ERR. * * Note that this function takes 'min' and 'max' in the same form of the - * Redis ZRANGEBYLEX command. */ + * ZRANGEBYLEX command. */ int zsetInitLexRange(ValkeyModuleKey *key, ValkeyModuleString *min, ValkeyModuleString *max, int first) { if (!key->value || key->value->type != OBJ_ZSET) return VALKEYMODULE_ERR; @@ -5225,7 +5225,7 @@ int VM_ZsetRangePrev(ValkeyModuleKey *key) { * strings instead of ValkeyModuleString objects. * VALKEYMODULE_HASH_COUNT_ALL: Include the number of inserted fields in the * returned number, in addition to the number of - * updated and deleted fields. (Added in Redis + * updated and deleted fields. (Added in Redis OSS * 6.2.) * * Unless NX is specified, the command overwrites the old field value with @@ -5245,7 +5245,7 @@ int VM_ZsetRangePrev(ValkeyModuleKey *key) { * flag VALKEYMODULE_HASH_COUNT_ALL is set, inserted fields not previously * existing in the hash are also counted. * - * If the return value is zero, `errno` is set (since Redis 6.2) as follows: + * If the return value is zero, `errno` is set (since Redis OSS 6.2) as follows: * * - EINVAL if any unknown flags are set or if key is NULL. * - ENOTSUP if the key is associated with a non Hash value. @@ -5256,8 +5256,8 @@ int VM_ZsetRangePrev(ValkeyModuleKey *key) { * back due to the NX and XX flags. * * NOTICE: The return value semantics of this function are very different - * between Redis 6.2 and older versions. Modules that use it should determine - * the Redis version and handle it accordingly. + * between Redis OSS 6.2 and older versions. Modules that use it should determine + * the server version and handle it accordingly. */ int VM_HashSet(ValkeyModuleKey *key, int flags, ...) { va_list ap; @@ -5891,9 +5891,9 @@ long long VM_StreamTrimByID(ValkeyModuleKey *key, int flags, ValkeyModuleStreamI } /* -------------------------------------------------------------------------- - * ## Calling Redis commands from modules + * ## Calling commands from modules * - * VM_Call() sends a command to Redis. The remaining functions handle the reply. + * VM_Call() sends a command to the server. The remaining functions handle the reply. * -------------------------------------------------------------------------- */ @@ -6043,7 +6043,7 @@ void VM_CallReplyPromiseSetUnblockHandler(ValkeyModuleCallReply *reply, ValkeyMo * If the execution was aborted successfully, it is promised that the unblock handler will not be called. * That said, it is possible that the abort operation will successes but the operation will still continue. * This can happened if, for example, a module implements some blocking command and does not respect the - * disconnect callback. For pure Redis commands this can not happened.*/ + * disconnect callback. For server-provided commands this can not happened.*/ int VM_CallReplyPromiseAbort(ValkeyModuleCallReply *reply, void **private_data) { ValkeyModuleAsyncRMCallPromise *promise = callReplyGetPrivateData(reply); if (!promise->c) return VALKEYMODULE_ERR; /* Promise can not be aborted, either already aborted or already finished. */ @@ -6197,9 +6197,9 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int return NULL; } -/* Exported API to call any Redis command from modules. +/* Exported API to call any command from modules. * - * * **cmdname**: The Redis command to call. + * * **cmdname**: The command to call. * * **fmt**: A format specifier string for the command's arguments. Each * of the arguments should be specified by a valid type specification. The * format specifier can also contain the modifiers `!`, `A`, `3` and `R` which @@ -6211,7 +6211,7 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int * * `l` -- The argument is a `long long` integer. * * `s` -- The argument is a ValkeyModuleString. * * `v` -- The argument(s) is a vector of ValkeyModuleString. - * * `!` -- Sends the Redis command and its arguments to replicas and AOF. + * * `!` -- Sends the command and its arguments to replicas and AOF. * * `A` -- Suppress AOF propagation, send only to replicas (requires `!`). * * `R` -- Suppress replicas propagation, send only to AOF (requires `!`). * * `3` -- Return a RESP3 reply. This will change the command reply. @@ -6229,7 +6229,7 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int * the command to run as the determined user, so that any future user * dependent activity, such as ACL checks within scripts will proceed as * expected. - * Otherwise, the command will run as the Redis unrestricted user. + * Otherwise, the command will run as the unrestricted user. * * `S` -- Run the command in a script mode, this means that it will raise * an error if a command which are not allowed inside a script * (flagged with the `deny-script` flag) is invoked (like SHUTDOWN). @@ -6252,7 +6252,7 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int * The module can use this reply object to set a handler which will be called when * the command gets unblocked using ValkeyModule_CallReplyPromiseSetUnblockHandler. * The handler must be set immediately after the command invocation (without releasing - * the Redis lock in between). If the handler is not set, the blocking command will + * the lock in between). If the handler is not set, the blocking command will * still continue its execution but the reply will be ignored (fire and forget), * notice that this is dangerous in case of role change, as explained below. * The module can use ValkeyModule_CallReplyPromiseAbort to abort the command invocation @@ -6260,21 +6260,21 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int * details). It is also the module's responsibility to abort the execution on role change, either by using * server event (to get notified when the instance becomes a replica) or relying on the disconnect * callback of the original client. Failing to do so can result in a write operation on a replica. - * Unlike other call replies, promise call reply **must** be freed while the Redis GIL is locked. + * Unlike other call replies, promise call reply **must** be freed while the GIL is locked. * Notice that on unblocking, the only promise is that the unblock handler will be called, * If the blocking VM_Call caused the module to also block some real client (using VM_BlockClient), * it is the module responsibility to unblock this client on the unblock handler. * On the unblock handler it is only allowed to perform the following: - * * Calling additional Redis commands using VM_Call + * * Calling additional commands using VM_Call * * Open keys using VM_OpenKey * * Replicate data to the replica or AOF * - * Specifically, it is not allowed to call any Redis module API which are client related such as: + * Specifically, it is not allowed to call any module API which are client related such as: * * VM_Reply* API's * * VM_BlockClient * * VM_GetCurrentUserName * - * * **...**: The actual arguments to the Redis command. + * * **...**: The actual arguments to the command. * * On success a ValkeyModuleCallReply object is returned, otherwise * NULL is returned and errno is set to the following values: @@ -6511,7 +6511,7 @@ ValkeyModuleCallReply *VM_Call(ValkeyModuleCtx *ctx, const char *cmdname, const } } - /* If this is a Redis Cluster node, we need to make sure the module is not + /* If this is a Cluster node, we need to make sure the module is not * trying to access non-local keys, with the exception of commands * received from our master. */ if (server.cluster_enabled && !mustObeyClient(ctx->client)) { @@ -6616,8 +6616,8 @@ const char *VM_CallReplyProto(ValkeyModuleCallReply *reply, size_t *len) { * ## Modules data types * * When String DMA or using existing data structures is not enough, it is - * possible to create new data types from scratch and export them to - * Redis. The module must provide a set of callbacks for handling the + * possible to create new data types from scratch. + * The module must provide a set of callbacks for handling the * new values exported (for example in order to provide RDB saving/loading, * AOF rewrite, and so forth). In this section we define this API. * -------------------------------------------------------------------------- */ @@ -6810,7 +6810,7 @@ robj *moduleTypeDupOrReply(client *c, robj *fromkey, robj *tokey, int todb, robj * following. Please for in depth documentation check the modules API * documentation, especially https://redis.io/topics/modules-native-types. * - * * **name**: A 9 characters data type name that MUST be unique in the Redis + * * **name**: A 9 characters data type name that MUST be unique in the * Modules ecosystem. Be creative... and there will be no collisions. Use * the charset A-Z a-z 9-0, plus the two "-_" characters. A good * idea is to use, for example `-`. For example @@ -6870,7 +6870,7 @@ robj *moduleTypeDupOrReply(client *c, robj *fromkey, robj *tokey, int todb, robj * freeing the value. for example: how many pointers are gonna be freed. Note that if it * returns 0, we'll always do an async free. * * **unlink**: A callback function pointer that used to notifies the module that the key has - * been removed from the DB by redis, and may soon be freed by a background thread. Note that + * been removed from the DB by the server, and may soon be freed by a background thread. Note that * it won't be called on FLUSHALL/FLUSHDB (both sync and async), and the module can use the * ValkeyModuleEvent_FlushDB to hook into that. * * **copy**: A callback function pointer that is used to make a copy of the specified key. @@ -6914,7 +6914,7 @@ robj *moduleTypeDupOrReply(client *c, robj *fromkey, robj *tokey, int todb, robj * If ValkeyModule_CreateDataType() is called outside of ValkeyModule_OnLoad() function, * there is already a module registering a type with the same name, * or if the module name or encver is invalid, NULL is returned. - * Otherwise the new type is registered into Redis, and a reference of + * Otherwise the new type is registered into the server, and a reference of * type ValkeyModuleType is returned: the caller of the function should store * this reference into a global variable to make future use of it in the * modules type API, since a single module may register multiple types. @@ -7390,11 +7390,11 @@ ssize_t rdbSaveModulesAux(rio *rdb, int when) { * one element after the other, for all the elements that constitute a given * data structure. The function call must be followed by the call to * `ValkeyModule_DigestEndSequence` eventually, when all the elements that are - * always in a given order are added. See the Redis Modules data types - * documentation for more info. However this is a quick example that uses Redis - * data types as an example. + * always in a given order are added. See the Modules data types + * documentation for more info. However this is a quick example that uses the + * Set, Hash and List data types as an example. * - * To add a sequence of unordered elements (for example in the case of a Redis + * To add a sequence of unordered elements (for example in the case of a * Set), the pattern to use is: * * foreach element { @@ -7414,7 +7414,7 @@ ssize_t rdbSaveModulesAux(rio *rdb, int when) { * } * * Because the key and value will be always in the above order, while instead - * the single key-value pairs, can appear in any position into a Redis hash. + * the single key-value pairs, can appear in any position into a hash. * * A list of ordered elements would be implemented with: * @@ -7447,13 +7447,13 @@ void VM_DigestEndSequence(ValkeyModuleDigest *md) { * * This call basically reuses the 'rdb_load' callback which module data types * implement in order to allow a module to arbitrarily serialize/de-serialize - * keys, similar to how the Redis 'DUMP' and 'RESTORE' commands are implemented. + * keys, similar to how the 'DUMP' and 'RESTORE' commands are implemented. * * Modules should generally use the VALKEYMODULE_OPTIONS_HANDLE_IO_ERRORS flag and * make sure the de-serialization code properly checks and handles IO errors * (freeing allocated buffers and returning a NULL). * - * If this is NOT done, Redis will handle corrupted (or just truncated) serialized + * If this is NOT done, the server will handle corrupted (or just truncated) serialized * data by producing an error message and terminating the process. */ void *VM_LoadDataTypeFromStringEncver(const ValkeyModuleString *str, const moduleType *mt, int encver) { @@ -7487,7 +7487,7 @@ void *VM_LoadDataTypeFromString(const ValkeyModuleString *str, const moduleType * * This call basically reuses the 'rdb_save' callback which module data types * implement in order to allow a module to arbitrarily serialize/de-serialize - * keys, similar to how the Redis 'DUMP' and 'RESTORE' commands are implemented. + * keys, similar to how the 'DUMP' and 'RESTORE' commands are implemented. */ ValkeyModuleString *VM_SaveDataTypeToString(ValkeyModuleCtx *ctx, void *data, const moduleType *mt) { rio payload; @@ -7526,7 +7526,7 @@ int VM_GetDbIdFromDigest(ValkeyModuleDigest *dig) { * is only called in the context of the aof_rewrite method of data types exported * by a module. The command works exactly like ValkeyModule_Call() in the way * the parameters are passed, but it does not return anything as the error - * handling is performed by Redis itself. */ + * handling is performed by the server itself. */ void VM_EmitAOF(ValkeyModuleIO *io, const char *cmdname, const char *fmt, ...) { if (io->error) return; struct serverCommand *cmd; @@ -7545,7 +7545,7 @@ void VM_EmitAOF(ValkeyModuleIO *io, const char *cmdname, const char *fmt, ...) { return; } - /* Emit the arguments into the AOF in Redis protocol format. */ + /* Emit the arguments into the AOF in RESP format. */ va_start(ap, fmt); argv = moduleCreateArgvFromUserFormat(cmdname,fmt,&argc,&flags,ap); va_end(ap); @@ -7636,7 +7636,7 @@ void moduleLogRaw(ValkeyModule *module, const char *levelstr, const char *fmt, v serverLogRaw(level,msg); } -/* Produces a log message to the standard Redis log, the format accepts +/* Produces a log message to the standard server log, the format accepts * printf-alike specifiers, while level is a string describing the log * level to use when emitting the log, and must be one of the following: * @@ -7673,13 +7673,13 @@ void VM_LogIOError(ValkeyModuleIO *io, const char *levelstr, const char *fmt, .. va_end(ap); } -/* Redis-like assert function. +/* Valkey assert function. * * The macro `ValkeyModule_Assert(expression)` is recommended, rather than * calling this function directly. * * A failed assertion will shut down the server and produce logging information - * that looks identical to information generated by Redis itself. + * that looks identical to information generated by the server itself. */ void VM__Assert(const char *estr, const char *file, int line) { _serverAssert(estr, file, line); @@ -7712,7 +7712,7 @@ int isModuleClientUnblocked(client *c) { * because the client is terminated, but is also called for cleanup when a * client is unblocked in a clean way after replaying. * - * What we do here is just to set the client to NULL in the redis module + * What we do here is just to set the client to NULL in the module * blocked client handle. This way if the client is terminated while there * is a pending threaded operation involving the blocked client, we'll know * that the client no longer exists and no reply callback should be called. @@ -8124,10 +8124,10 @@ void VM_BlockClientSetPrivateData(ValkeyModuleBlockedClient *blocked_client, voi } /* This call is similar to ValkeyModule_BlockClient(), however in this case we - * don't just block the client, but also ask Redis to unblock it automatically + * don't just block the client, but also ask the server to unblock it automatically * once certain keys become "ready", that is, contain more data. * - * Basically this is similar to what a typical Redis command usually does, + * Basically this is similar to what a typical command usually does, * like BLPOP or BZPOPMAX: the client blocks if it cannot be served ASAP, * and later when the key receives new data (a list push for instance), the * client is unblocked and served. @@ -8166,7 +8166,7 @@ void VM_BlockClientSetPrivateData(ValkeyModuleBlockedClient *blocked_client, voi * be accessible later in the reply callback. Normally when blocking with * ValkeyModule_BlockClient() the private data to reply to the client is * passed when calling ValkeyModule_UnblockClient() but here the unblocking - * is performed by Redis itself, so we need to have some private data before + * is performed by the server itself, so we need to have some private data before * hand. The private data is used to store any information about the specific * unblocking operation that you are implementing. Such information will be * freed using the free_privdata callback provided by the user. @@ -8225,7 +8225,7 @@ int moduleUnblockClientByHandle(ValkeyModuleBlockedClient *bc, void *privdata) { return VALKEYMODULE_OK; } -/* This API is used by the Redis core to unblock a client that was blocked +/* This API is used by the server core to unblock a client that was blocked * by a module. */ void moduleUnblockClient(client *c) { ValkeyModuleBlockedClient *bc = c->bstate.module_blocked_handle; @@ -8304,7 +8304,7 @@ void VM_SetDisconnectCallback(ValkeyModuleBlockedClient *bc, ValkeyModuleDisconn * * Clients end into this list because of calls to VM_UnblockClient(), * however it is possible that while the module was doing work for the - * blocked client, it was terminated by Redis (for timeout or other reasons). + * blocked client, it was terminated by the server (for timeout or other reasons). * When this happens the ValkeyModuleBlockedClient structure in the queue * will have the 'client' field set to NULL. */ void moduleHandleBlockedClients(void) { @@ -8506,8 +8506,8 @@ int VM_BlockedClientDisconnected(ValkeyModuleCtx *ctx) { * ## Thread Safe Contexts * -------------------------------------------------------------------------- */ -/* Return a context which can be used inside threads to make Redis context - * calls with certain modules APIs. If 'bc' is not NULL then the module will +/* Return a context which can be used inside threads to make calls requiring a + * context with certain modules APIs. If 'bc' is not NULL then the module will * be bound to a blocked client, and it will be possible to use the * `ValkeyModule_Reply*` family of functions to accumulate a reply for when the * client will be unblocked. Otherwise the thread safe context will be @@ -8698,9 +8698,9 @@ void moduleReleaseGIL(void) { * * `type` is the event type bit, that must match the mask given at registration * time. The event string is the actual command being executed, and key is the - * relevant Redis key. + * relevant key. * - * Notification callback gets executed with a redis context that can not be + * Notification callback gets executed with a context that can not be * used to send anything to the client, and has the db number where the event * occurred as its selected db number. * @@ -8708,11 +8708,11 @@ void moduleReleaseGIL(void) { * module notifications to work. * * Warning: the notification callbacks are performed in a synchronous manner, - * so notification callbacks must to be fast, or they would slow Redis down. + * so notification callbacks must to be fast, or they would slow the server down. * If you need to take long actions, use threads to offload them. * * Moreover, the fact that the notification is executed synchronously means - * that the notification code will be executed in the middle on Redis logic + * that the notification code will be executed in the middle of server logic * (commands logic, eviction, expire). Changing the key space while the logic * runs is dangerous and discouraged. In order to react to key space events with * write actions, please refer to `VM_AddPostNotificationJob`. @@ -8757,7 +8757,7 @@ void firePostExecutionUnitJobs(void) { /* When running inside a key space notification callback, it is dangerous and highly discouraged to perform any write * operation (See `VM_SubscribeToKeyspaceEvents`). In order to still perform write actions in this scenario, - * Redis provides `VM_AddPostNotificationJob` API. The API allows to register a job callback which Redis will call + * the server provides `VM_AddPostNotificationJob` API. The API allows to register a job callback which the server will call * when the following condition are promised to be fulfilled: * 1. It is safe to perform any write operation. * 2. The job will be called atomically along side the key space notification. @@ -8766,7 +8766,7 @@ void firePostExecutionUnitJobs(void) { * This raises a concerns of entering an infinite loops, we consider infinite loops * as a logical bug that need to be fixed in the module, an attempt to protect against * infinite loops by halting the execution could result in violation of the feature correctness - * and so Redis will make no attempt to protect the module from infinite loops. + * and so the server will make no attempt to protect the module from infinite loops. * * 'free_pd' can be NULL and in such case will not be used. * @@ -8978,8 +8978,8 @@ int VM_SendClusterMessage(ValkeyModuleCtx *ctx, const char *target_id, uint8_t t /* Return an array of string pointers, each string pointer points to a cluster * node ID of exactly VALKEYMODULE_NODE_ID_LEN bytes (without any null term). * The number of returned node IDs is stored into `*numnodes`. - * However if this function is called by a module not running an a Redis - * instance with Redis Cluster enabled, NULL is returned instead. + * However if this function is called by a module not running an an + * instance with Cluster enabled, NULL is returned instead. * * The IDs returned can be used with ValkeyModule_GetClusterNodeInfo() in order * to get more information about single node. @@ -9083,10 +9083,10 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * return VALKEYMODULE_OK; } -/* Set Redis Cluster flags in order to change the normal behavior of - * Redis Cluster, especially with the goal of disabling certain functions. +/* Set Cluster flags in order to change the normal behavior of + * Cluster, especially with the goal of disabling certain functions. * This is useful for modules that use the Cluster API in order to create - * a different distributed system, but still want to use the Redis Cluster + * a different distributed system, but still want to use the Cluster * message bus. Flags that can be set: * * * CLUSTER_MODULE_FLAG_NO_FAILOVER @@ -9094,11 +9094,11 @@ int VM_GetClusterNodeInfo(ValkeyModuleCtx *ctx, const char *id, char *ip, char * * * With the following effects: * - * * NO_FAILOVER: prevent Redis Cluster slaves from failing over a dead master. + * * NO_FAILOVER: prevent Cluster slaves from failing over a dead master. * Also disables the replica migration feature. * * * NO_REDIRECTION: Every node will accept any key, without trying to perform - * partitioning according to the Redis Cluster algorithm. + * partitioning according to the Cluster algorithm. * Slots information will still be propagated across the * cluster, but without effect. */ void VM_SetClusterFlags(ValkeyModuleCtx *ctx, uint64_t flags) { @@ -9131,7 +9131,7 @@ const char *VM_ClusterCanonicalKeyNameInSlot(unsigned int slot) { * module timers subsystem in order to process the next event. * * All the timers are stored into a radix tree, ordered by expire time, when - * the main Redis event loop timer callback is called, we try to process all + * the main server event loop timer callback is called, we try to process all * the timers already expired one after the other. Then we re-enter the event * loop registering a timer that will expire when the next to process module * timer will expire. @@ -9377,7 +9377,7 @@ static void eventLoopCbWritable(struct aeEventLoop *ae, int fd, void *user_data, * On success VALKEYMODULE_OK is returned, otherwise * VALKEYMODULE_ERR is returned and errno is set to the following values: * - * * ERANGE: `fd` is negative or higher than `maxclients` Redis config. + * * ERANGE: `fd` is negative or higher than `maxclients` server config. * * EINVAL: `callback` is NULL or `mask` value is invalid. * * `errno` might take other values in case of an internal error. @@ -9452,7 +9452,7 @@ int VM_EventLoopAdd(int fd, int mask, ValkeyModuleEventLoopFunc func, void *user * On success VALKEYMODULE_OK is returned, otherwise * VALKEYMODULE_ERR is returned and errno is set to the following values: * - * * ERANGE: `fd` is negative or higher than `maxclients` Redis config. + * * ERANGE: `fd` is negative or higher than `maxclients` server config. * * EINVAL: `mask` value is invalid. */ int VM_EventLoopDel(int fd, int mask) { @@ -9478,7 +9478,7 @@ int VM_EventLoopDel(int fd, int mask) { return VALKEYMODULE_OK; } -/* This function can be called from other threads to trigger callback on Redis +/* This function can be called from other threads to trigger callback on the server * main thread. On success VALKEYMODULE_OK is returned. If `func` is NULL * VALKEYMODULE_ERR is returned and errno is set to EINVAL. */ @@ -9529,7 +9529,7 @@ static void eventLoopHandleOneShotEvents(void) { /* -------------------------------------------------------------------------- * ## Modules ACL API * - * Implements a hook into the authentication and authorization within Redis. + * Implements a hook into the authentication and authorization within the server. * --------------------------------------------------------------------------*/ /* This function is called when a client's user has changed and invokes the @@ -9588,7 +9588,7 @@ static void moduleFreeAuthenticatedClients(ValkeyModule *module) { } } -/* Creates a Redis ACL user that the module can use to authenticate a client. +/* Creates an ACL user that the module can use to authenticate a client. * After obtaining the user, the module should set what such user can do * using the VM_SetUserACL() function. Once configured, the user * can be used in order to authenticate a connection, with the specified @@ -9600,7 +9600,7 @@ static void moduleFreeAuthenticatedClients(ValkeyModule *module) { * * Users created here are not checked for duplicated name, so it's up to * the module calling this function to take care of not creating users * with the same name. - * * The created user can be used to authenticate multiple Redis connections. + * * The created user can be used to authenticate multiple connections. * * The caller can later free the user using the function * VM_FreeModuleUser(). When this function is called, if there are @@ -9628,7 +9628,7 @@ int VM_FreeModuleUser(ValkeyModuleUser *user) { return VALKEYMODULE_OK; } -/* Sets the permissions of a user created through the redis module +/* Sets the permissions of a user created through the module * interface. The syntax is the same as ACL SETUSER, so refer to the * documentation in acl.c for more information. See VM_CreateModuleUser * for detailed usage. @@ -9640,7 +9640,7 @@ int VM_SetModuleUserACL(ValkeyModuleUser *user, const char* acl) { } /* Sets the permission of a user with a complete ACL string, such as one - * would use on the redis ACL SETUSER command line API. This differs from + * would use on the ACL SETUSER command line API. This differs from * VM_SetModuleUserACL, which only takes single ACL operations at a time. * * Returns VALKEYMODULE_OK on success and VALKEYMODULE_ERR on failure @@ -9895,7 +9895,7 @@ static int authenticateClientWithUser(ValkeyModuleCtx *ctx, user *user, ValkeyMo } -/* Authenticate the current context's user with the provided redis acl user. +/* Authenticate the current context's user with the provided acl user. * Returns VALKEYMODULE_ERR if the user is disabled. * * See authenticateClientWithUser for information about callback, client_id, @@ -9904,7 +9904,7 @@ int VM_AuthenticateClientWithUser(ValkeyModuleCtx *ctx, ValkeyModuleUser *module return authenticateClientWithUser(ctx, module_user->user, callback, privdata, client_id); } -/* Authenticate the current context's user with the provided redis acl user. +/* Authenticate the current context's user with the provided acl user. * Returns VALKEYMODULE_ERR if the user is disabled or the user does not exist. * * See authenticateClientWithUser for information about callback, client_id, @@ -10717,11 +10717,11 @@ int moduleUnregisterFilters(ValkeyModule *module) { /* Register a new command filter function. * - * Command filtering makes it possible for modules to extend Redis by plugging + * Command filtering makes it possible for modules to extend the server by plugging * into the execution flow of all commands. * - * A registered filter gets called before Redis executes *any* command. This - * includes both core Redis commands and commands registered by any module. The + * A registered filter gets called before the server executes *any* command. This + * includes both core server commands and commands registered by any module. The * filter applies in all execution paths including: * * 1. Invocation by a client. @@ -10732,21 +10732,21 @@ int moduleUnregisterFilters(ValkeyModule *module) { * The filter executes in a special filter context, which is different and more * limited than a ValkeyModuleCtx. Because the filter affects any command, it * must be implemented in a very efficient way to reduce the performance impact - * on Redis. All Redis Module API calls that require a valid context (such as + * on the server. All Module API calls that require a valid context (such as * `ValkeyModule_Call()`, `ValkeyModule_OpenKey()`, etc.) are not supported in a * filter context. * * The `ValkeyModuleCommandFilterCtx` can be used to inspect or modify the - * executed command and its arguments. As the filter executes before Redis + * executed command and its arguments. As the filter executes before the server * begins processing the command, any change will affect the way the command is - * processed. For example, a module can override Redis commands this way: + * processed. For example, a module can override server commands this way: * * 1. Register a `MODULE.SET` command which implements an extended version of - * the Redis `SET` command. + * the `SET` command. * 2. Register a command filter which detects invocation of `SET` on a specific * pattern of keys. Once detected, the filter will replace the first * argument from `SET` to `MODULE.SET`. - * 3. When filter execution is complete, Redis considers the new command name + * 3. When filter execution is complete, the server considers the new command name * and therefore executes the module's own command. * * Note that in the above use case, if `MODULE.SET` itself uses @@ -10847,7 +10847,7 @@ ValkeyModuleString *VM_CommandFilterArgGet(ValkeyModuleCommandFilterCtx *fctx, i } /* Modify the filtered command by inserting a new argument at the specified - * position. The specified ValkeyModuleString argument may be used by Redis + * position. The specified ValkeyModuleString argument may be used by the server * after the filter context is destroyed, so it must not be auto-memory * allocated, freed or used elsewhere. */ @@ -10871,7 +10871,7 @@ int VM_CommandFilterArgInsert(ValkeyModuleCommandFilterCtx *fctx, int pos, Valke } /* Modify the filtered command by replacing an existing argument with a new one. - * The specified ValkeyModuleString argument may be used by Redis after the + * The specified ValkeyModuleString argument may be used by the server after the * filter context is destroyed, so it must not be auto-memory allocated, freed * or used elsewhere. */ @@ -10946,7 +10946,7 @@ size_t VM_MallocSizeDict(ValkeyModuleDict* dict) { } /* Return the a number between 0 to 1 indicating the amount of memory - * currently used, relative to the Redis "maxmemory" configuration. + * currently used, relative to the server "maxmemory" configuration. * * * 0 - No memory limit configured. * * Between 0 and 1 - The percentage of the memory used normalized in 0-1 range. @@ -11018,7 +11018,7 @@ void VM_ScanCursorDestroy(ValkeyModuleScanCursor *cursor) { * void scan_callback(ValkeyModuleCtx *ctx, ValkeyModuleString *keyname, * ValkeyModuleKey *key, void *privdata); * - * - `ctx`: the redis module context provided to for the scan. + * - `ctx`: the module context provided to for the scan. * - `keyname`: owned by the caller and need to be retained if used after this * function. * - `key`: holds info on the key and value, it is provided as best effort, in @@ -11051,7 +11051,7 @@ void VM_ScanCursorDestroy(ValkeyModuleScanCursor *cursor) { * * It is also possible to restart an existing cursor using VM_ScanCursorRestart. * - * IMPORTANT: This API is very similar to the Redis SCAN command from the + * IMPORTANT: This API is very similar to the SCAN command from the * point of view of the guarantees it provides. This means that the API * may report duplicated keys, but guarantees to report at least one time * every key that was there from the start to the end of the scanning process. @@ -11060,7 +11060,7 @@ void VM_ScanCursorDestroy(ValkeyModuleScanCursor *cursor) { * that the internal state of the database may change. For instance it is safe * to delete or modify the current key, but may not be safe to delete any * other key. - * Moreover playing with the Redis keyspace while iterating may have the + * Moreover playing with the keyspace while iterating may have the * effect of returning more duplicates. A safe pattern is to store the keys * names you want to modify elsewhere, and perform the actions on the keys * later when the iteration is complete. However this can cost a lot of @@ -11116,7 +11116,7 @@ static void moduleScanKeyCallback(void *privdata, const dictEntry *de) { * * void scan_callback(ValkeyModuleKey *key, ValkeyModuleString* field, ValkeyModuleString* value, void *privdata); * - * - key - the redis key context provided to for the scan. + * - key - the key context provided to for the scan. * - field - field name, owned by the caller and need to be retained if used * after this function. * - value - value string or NULL for set type, owned by the caller and need to @@ -11238,7 +11238,7 @@ int VM_ScanKey(ValkeyModuleKey *key, ValkeyModuleScanCursor *cursor, ValkeyModul /* Create a background child process with the current frozen snapshot of the * main process where you can do some processing in the background without * affecting / freezing the traffic and no need for threads and GIL locking. - * Note that Redis allows for only one concurrent fork. + * Note that the server allows for only one concurrent fork. * When the child wants to exit, it should call ValkeyModule_ExitFromChild. * If the parent wants to kill the child it should call ValkeyModule_KillForkChild * The done handler callback will be executed on the parent process when the @@ -11371,7 +11371,7 @@ static uint64_t moduleEventVersions[] = { * uint64_t subevent, * void *data); * - * The 'ctx' is a normal Redis module context that the callback can use in + * The 'ctx' is a normal module context that the callback can use in * order to call other modules APIs. The 'eid' is the event itself, this * is only useful in the case the module subscribed to multiple events: using * the 'id' field of this structure it is possible to check if the event @@ -11494,15 +11494,15 @@ static uint64_t moduleEventVersions[] = { * * `VALKEYMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE` * * No additional information is available so far: future versions - * of Redis will have an API in order to enumerate the replicas + * of the server will have an API in order to enumerate the replicas * connected and their state. * * * ValkeyModuleEvent_CronLoop * - * This event is called every time Redis calls the serverCron() + * This event is called every time the server calls the serverCron() * function in order to do certain bookkeeping. Modules that are * required to do operations from time to time may use this callback. - * Normally Redis calls this function 10 times per second, but + * Normally the server calls this function 10 times per second, but * this changes depending on the "hz" configuration. * No sub events are available. * @@ -11511,7 +11511,7 @@ static uint64_t moduleEventVersions[] = { * * int32_t hz; // Approximate number of events per second. * - * * ValkeyModuleEvent_MasterLinkChange + * * ValkeyModuleEvent_PrimaryLinkChange * * This is called for replicas in order to notify when the * replication link becomes functional (up) with our master, @@ -11567,12 +11567,12 @@ static uint64_t moduleEventVersions[] = { * * * ValkeyModuleEvent_ReplBackup * - * WARNING: Replication Backup events are deprecated since Redis 7.0 and are never fired. + * WARNING: Replication Backup events are deprecated since Redis OSS 7.0 and are never fired. * See ValkeyModuleEvent_ReplAsyncLoad for understanding how Async Replication Loading events * are now triggered when repl-diskless-load is set to swapdb. * * Called when repl-diskless-load config is set to swapdb, - * And redis needs to backup the current database for the + * And the server needs to backup the current database for the * possibility to be restored later. A module with global data and * maybe with aux_load and aux_save callbacks may need to use this * notification to backup / restore / discard its globals. @@ -11586,7 +11586,7 @@ static uint64_t moduleEventVersions[] = { * * Called when repl-diskless-load config is set to swapdb and a replication with a master of same * data set history (matching replication ID) occurs. - * In which case redis serves current data set while loading new database in memory from socket. + * In which case the server serves current data set while loading new database in memory from socket. * Modules must have declared they support this mechanism in order to activate it, through * VALKEYMODULE_OPTIONS_HANDLE_REPL_ASYNC_LOAD flag. * The following sub events are available: @@ -11736,7 +11736,7 @@ typedef struct KeyInfo { int mode; } KeyInfo; -/* This is called by the Redis internals every time we want to fire an +/* This is called by the server internals every time we want to fire an * event that can be intercepted by some module. The pointer 'data' is useful * in order to populate the event-specific structure when needed, in order * to return the structure with more information to the callback. @@ -11963,7 +11963,7 @@ int moduleRegisterApi(const char *funcname, void *funcptr) { moduleRegisterApi("ValkeyModule_" #name, (void *)(unsigned long)VM_ ## name);\ moduleRegisterApi("RedisModule_" #name, (void *)(unsigned long)VM_ ## name);\ -/* Global initialization at Redis startup. */ +/* Global initialization at server startup. */ void moduleRegisterCoreAPI(void); /* Currently, this function is just a placeholder for the module system @@ -12002,11 +12002,11 @@ void moduleInitModulesSystem(void) { moduleRegisterCoreAPI(); - /* Create a pipe for module threads to be able to wake up the redis main thread. + /* Create a pipe for module threads to be able to wake up the server main thread. * Make the pipe non blocking. This is just a best effort aware mechanism * and we do not want to block not in the read nor in the write half. * Enable close-on-exec flag on pipes in case of the fork-exec system calls in - * sentinels or redis servers. */ + * sentinels or servers. */ if (anetPipe(server.module_pipe, O_CLOEXEC|O_NONBLOCK, O_CLOEXEC|O_NONBLOCK) == -1) { serverLog(LL_WARNING, "Can't create the pipe for module threads: %s", strerror(errno)); @@ -12337,7 +12337,7 @@ int moduleLoad(const char *path, void **module_argv, int module_argc, int is_loa return C_ERR; } - /* Redis module loaded! Register it. */ + /* Module loaded! Register it. */ dictAdd(modules,ctx.module->name,ctx.module); ctx.module->blocked_clients = 0; ctx.module->handle = handle; @@ -12808,11 +12808,11 @@ unsigned int maskModuleEnumConfigFlags(unsigned int flags) { return new_flags; } -/* Create a string config that Redis users can interact with via the Redis config file, +/* Create a string config that users can interact with via the server config file, * `CONFIG SET`, `CONFIG GET`, and `CONFIG REWRITE` commands. * * The actual config value is owned by the module, and the `getfn`, `setfn` and optional - * `applyfn` callbacks that are provided to Redis in order to access or manipulate the + * `applyfn` callbacks that are provided to the server in order to access or manipulate the * value. The `getfn` callback retrieves the value from the module, while the `setfn` * callback provides a value to be stored into the module config. * The optional `applyfn` callback is called after a `CONFIG SET` command modified one or @@ -12823,10 +12823,10 @@ unsigned int maskModuleEnumConfigFlags(unsigned int flags) { * are identical, and the callback will only be run once. * Both the `setfn` and `applyfn` can return an error if the provided value is invalid or * cannot be used. - * The config also declares a type for the value that is validated by Redis and + * The config also declares a type for the value that is validated by the server and * provided to the module. The config system provides the following types: * - * * Redis String: Binary safe string data. + * * String: Binary safe string data. * * Enum: One of a finite number of string tokens, provided during registration. * * Numeric: 64 bit signed integer, which also supports min and max values. * * Bool: Yes or no value. @@ -12834,7 +12834,7 @@ unsigned int maskModuleEnumConfigFlags(unsigned int flags) { * The `setfn` callback is expected to return VALKEYMODULE_OK when the value is successfully * applied. It can also return VALKEYMODULE_ERR if the value can't be applied, and the * *err pointer can be set with a ValkeyModuleString error message to provide to the client. - * This ValkeyModuleString will be freed by redis after returning from the set callback. + * This ValkeyModuleString will be freed by the server after returning from the set callback. * * All configs are registered with a name, a type, a default value, private data that is made * available in the callbacks, as well as several flags that modify the behavior of the config. @@ -12918,8 +12918,8 @@ int VM_RegisterBoolConfig(ValkeyModuleCtx *ctx, const char *name, int default_va * Create an enum config that server clients can interact with via the * `CONFIG SET`, `CONFIG GET`, and `CONFIG REWRITE` commands. * Enum configs are a set of string tokens to corresponding integer values, where - * the string value is exposed to Redis clients but the value passed Redis and the - * module is the integer value. These values are defined in enum_values, an array + * the string value is exposed to clients but the inter value is passed to the server + * and the module. These values are defined in enum_values, an array * of null-terminated c strings, and int_vals, an array of enum values who has an * index partner in enum_values. * Example Implementation: @@ -13129,7 +13129,7 @@ int VM_RdbSave(ValkeyModuleCtx *ctx, ValkeyModuleRdbStream *stream, int flags) { return VALKEYMODULE_OK; } -/* Redis MODULE command. +/* MODULE command. * * MODULE LIST * MODULE LOAD [args...] @@ -13268,7 +13268,7 @@ int VM_GetLFU(ValkeyModuleKey *key, long long *lfu_freq) { /** * Returns the full module options flags mask, using the return value * the module can check if a certain set of module options are supported - * by the redis server version in use. + * by the server version in use. * Example: * * int supportedFlags = VM_GetModuleOptionsAll(); @@ -13285,7 +13285,7 @@ int VM_GetModuleOptionsAll(void) { /** * Returns the full ContextFlags mask, using the return value * the module can check if a certain set of flags are supported - * by the redis server version in use. + * by the server version in use. * Example: * * int supportedFlags = VM_GetContextFlagsAll(); @@ -13302,7 +13302,7 @@ int VM_GetContextFlagsAll(void) { /** * Returns the full KeyspaceNotification mask, using the return value * the module can check if a certain set of flags are supported - * by the redis server version in use. + * by the server version in use. * Example: * * int supportedFlags = VM_GetKeyspaceNotificationFlagsAll(); @@ -13317,7 +13317,7 @@ int VM_GetKeyspaceNotificationFlagsAll(void) { } /** - * Return the redis version in format of 0x00MMmmpp. + * Return the server version in format of 0x00MMmmpp. * Example for 6.0.7 the return value will be 0x00060007. */ int VM_GetServerVersion(void) { @@ -13325,7 +13325,7 @@ int VM_GetServerVersion(void) { } /** - * Return the current redis-server runtime value of VALKEYMODULE_TYPE_METHOD_VERSION. + * Return the current server runtime value of VALKEYMODULE_TYPE_METHOD_VERSION. * You can use that when calling VM_CreateDataType to know which fields of * ValkeyModuleTypeMethods are gonna be supported and which will be ignored. */ @@ -13382,7 +13382,7 @@ int VM_ModuleTypeReplaceValue(ValkeyModuleKey *key, moduleType *mt, void *new_va * * ENOENT: Specified command does not exist. * * EINVAL: Invalid command arity specified. * - * NOTE: The returned array is not a Redis Module object so it does not + * NOTE: The returned array is not a Module object so it does not * get automatically freed even when auto-memory is used. The caller * must explicitly call VM_Free() to free it, same as the out_flags pointer if * used. @@ -13554,7 +13554,7 @@ void *VM_DefragAlloc(ValkeyModuleDefragCtx *ctx, void *ptr) { * Typically this means strings retained with VM_RetainString or VM_HoldString * may not be defragmentable. One exception is command argvs which, if retained * by the module, will end up with a single reference (because the reference - * on the Redis side is dropped as soon as the command callback returns). + * on the server side is dropped as soon as the command callback returns). */ ValkeyModuleString *VM_DefragValkeyModuleString(ValkeyModuleDefragCtx *ctx, ValkeyModuleString *str) { UNUSED(ctx); diff --git a/src/modules/helloacl.c b/src/modules/helloacl.c index a7660afea3..282f1e6a84 100644 --- a/src/modules/helloacl.c +++ b/src/modules/helloacl.c @@ -154,8 +154,8 @@ int AuthAsyncCommand_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, return VALKEYMODULE_OK; } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); diff --git a/src/modules/helloblock.c b/src/modules/helloblock.c index 8c036bead5..7fd79cd366 100644 --- a/src/modules/helloblock.c +++ b/src/modules/helloblock.c @@ -198,8 +198,8 @@ int HelloKeys_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int ar return VALKEYMODULE_OK; } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); diff --git a/src/modules/hellocluster.c b/src/modules/hellocluster.c index 7a70332ada..e2c7ac57a8 100644 --- a/src/modules/hellocluster.c +++ b/src/modules/hellocluster.c @@ -86,8 +86,8 @@ void PongReceiver(RedisModuleCtx *ctx, const char *sender_id, uint8_t type, cons type,VALKEYMODULE_NODE_ID_LEN,sender_id,(int)len, payload); } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); @@ -103,7 +103,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) ListCommand_RedisCommand,"readonly",0,0,0) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; - /* Disable Redis Cluster sharding and redirections. This way every node + /* Disable Cluster sharding and redirections. This way every node * will be able to access every possible key, regardless of the hash slot. * This way the PING message handler will be able to increment a specific * variable. Normally you do that in order for the distributed system diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c index 6a6420b7d3..98b69fd4ea 100644 --- a/src/modules/hellodict.c +++ b/src/modules/hellodict.c @@ -1,7 +1,7 @@ /* Hellodict -- An example of modules dictionary API * * This module implements a volatile key-value store on top of the - * dictionary exported by the Redis modules API. + * dictionary exported by the modules API. * * ----------------------------------------------------------------------------- * @@ -103,8 +103,8 @@ int cmd_KEYRANGE(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { return VALKEYMODULE_OK; } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); diff --git a/src/modules/hellohook.c b/src/modules/hellohook.c index 4b675891a2..58e0995f66 100644 --- a/src/modules/hellohook.c +++ b/src/modules/hellohook.c @@ -75,8 +75,8 @@ void flushdbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void } } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); diff --git a/src/modules/hellotimer.c b/src/modules/hellotimer.c index d0ea4e14a9..ae76f084d7 100644 --- a/src/modules/hellotimer.c +++ b/src/modules/hellotimer.c @@ -58,8 +58,8 @@ int TimerCommand_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int return RedisModule_ReplyWithSimpleString(ctx, "OK"); } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); diff --git a/src/modules/hellotype.c b/src/modules/hellotype.c index 31ca35f6f5..b0329c6dba 100644 --- a/src/modules/hellotype.c +++ b/src/modules/hellotype.c @@ -320,8 +320,8 @@ void HelloTypeDigest(RedisModuleDigest *md, void *value) { RedisModule_DigestEndSequence(md); } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { VALKEYMODULE_NOT_USED(argv); VALKEYMODULE_NOT_USED(argc); diff --git a/src/modules/helloworld.c b/src/modules/helloworld.c index 658bcb3926..8ca9137328 100644 --- a/src/modules/helloworld.c +++ b/src/modules/helloworld.c @@ -1,4 +1,4 @@ -/* Helloworld module -- A few examples of the Redis Modules API in the form +/* Helloworld module -- A few examples of the Modules API in the form * of commands showing how to accomplish common tasks. * * This module does not do anything useful, if not for a few commands. The @@ -42,7 +42,7 @@ /* HELLO.SIMPLE is among the simplest commands you can implement. * It just returns the currently selected DB id, a functionality which is - * missing in Redis. The command uses two important API calls: one to + * missing in the server. The command uses two important API calls: one to * fetch the currently selected DB, the other in order to send the client * an integer reply as response. */ int HelloSimple_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { @@ -73,8 +73,8 @@ int HelloPushNative_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, } /* HELLO.PUSH.CALL implements RPUSH using an higher level approach, calling - * a Redis command instead of working with the key in a low level way. This - * approach is useful when you need to call Redis commands that are not + * a command instead of working with the key in a low level way. This + * approach is useful when you need to call commands that are not * available as low level APIs, or when you don't need the maximum speed * possible but instead prefer implementation simplicity. */ int HelloPushCall_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) @@ -106,7 +106,7 @@ int HelloPushCall2_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, i } /* HELLO.LIST.SUM.LEN returns the total length of all the items inside - * a Redis list, by using the high level Call() API. + * a list, by using the high level Call() API. * This command is an example of the array reply access. */ int HelloListSumLen_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { @@ -492,7 +492,7 @@ int HelloHCopy_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int a * This is an implementation of the infamous LEFTPAD function, that * was at the center of an issue with the npm modules system in March 2016. * - * LEFTPAD is a good example of using a Redis Modules API called + * LEFTPAD is a good example of using a Modules API called * "pool allocator", that was a famous way to allocate memory in yet another * open source project, the Apache web server. * @@ -540,8 +540,8 @@ int HelloLeftPad_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int return VALKEYMODULE_OK; } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (RedisModule_Init(ctx,"helloworld",1,VALKEYMODULE_APIVER_1) == VALKEYMODULE_ERR) return VALKEYMODULE_ERR; diff --git a/src/monotonic.c b/src/monotonic.c index a78d9d5f9e..d96bfb2b3b 100644 --- a/src/monotonic.c +++ b/src/monotonic.c @@ -12,7 +12,7 @@ static char monotonic_info_string[32]; /* Using the processor clock (aka TSC on x86) can provide improved performance - * throughout Redis wherever the monotonic clock is used. The processor clock + * throughout the server wherever the monotonic clock is used. The processor clock * is significantly faster than calling 'clock_gettime' (POSIX). While this is * generally safe on modern systems, this link provides additional information * about use of the x86 TSC: http://oliveryang.net/2015/09/pitfalls-of-TSC-usage diff --git a/src/networking.c b/src/networking.c index c5ea529cdd..1a6c88aea6 100644 --- a/src/networking.c +++ b/src/networking.c @@ -473,7 +473,7 @@ void addReplySds(client *c, sds s) { * client buffer, trying the static buffer initially, and using the string * of objects if not possible. * - * It is efficient because does not create an SDS object nor an Redis object + * It is efficient because does not create an SDS object nor an Object * if not needed. The object will only be created by calling * _addReplyProtoToList() if we fail to extend the existing tail object * in the list of objects. */ @@ -483,7 +483,7 @@ void addReplyProto(client *c, const char *s, size_t len) { } /* Low level function called by the addReplyError...() functions. - * It emits the protocol for a Redis error, in the form: + * It emits the protocol for an error reply, in the form: * * -ERRORCODE Error Message * @@ -550,7 +550,7 @@ void afterErrorReply(client *c, const char *s, size_t len, int flags) { * * Where the master must propagate the first change even if the second * will produce an error. However it is useful to log such events since - * they are rare and may hint at errors in a script or a bug in Redis. */ + * they are rare and may hint at errors in a script or a bug in the server. */ int ctype = getClientType(c); if (ctype == CLIENT_TYPE_MASTER || ctype == CLIENT_TYPE_SLAVE || c->id == CLIENT_ID_AOF) { char *to, *from; @@ -1049,7 +1049,7 @@ void addReplyBulkLen(client *c, robj *obj) { addReplyLongLongWithPrefix(c,len,'$'); } -/* Add a Redis Object as a bulk reply */ +/* Add an Object as a bulk reply */ void addReplyBulk(client *c, robj *obj) { addReplyBulkLen(c,obj); addReply(c,obj); @@ -1128,7 +1128,7 @@ void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext) { /* This function is similar to the addReplyHelp function but adds the * ability to pass in two arrays of strings. Some commands have * some additional subcommands based on the specific feature implementation - * Redis is compiled with (currently just clustering). This function allows + * the server is compiled with (currently just clustering). This function allows * to pass is the common subcommands in `help` and any implementation * specific subcommands in `extended_help`. */ @@ -1739,7 +1739,7 @@ void freeClient(client *c) { void freeClientAsync(client *c) { /* We need to handle concurrent access to the server.clients_to_close list * only in the freeClientAsync() function, since it's the only function that - * may access the list while Redis uses I/O threads. All the other accesses + * may access the list while the server uses I/O threads. All the other accesses * are in the context of the main thread while the other threads are * idle. */ if (c->flags & CLIENT_CLOSE_ASAP || c->flags & CLIENT_SCRIPT) return; @@ -2226,7 +2226,7 @@ int processInlineBuffer(client *c) { c->repl_ack_time = server.unixtime; /* Masters should never send us inline protocol to run actual - * commands. If this happens, it is likely due to a bug in Redis where + * commands. If this happens, it is likely due to a bug in the server where * we got some desynchronization in the protocol, for example * because of a PSYNC gone bad. * @@ -2250,7 +2250,7 @@ int processInlineBuffer(client *c) { c->argv_len_sum = 0; } - /* Create redis objects for all arguments. */ + /* Create an Object for all arguments. */ for (c->argc = 0, j = 0; j < argc; j++) { c->argv[c->argc] = createObject(OBJ_STRING,argv[j]); c->argc++; @@ -2688,7 +2688,7 @@ void readQueryFromClient(connection *conn) { * buffer contains exactly the SDS string representing the object, even * at the risk of requiring more read(2) calls. This way the function * processMultiBulkBuffer() can avoid copying buffers to create the - * Redis Object representing the argument. */ + * robj representing the argument. */ if (c->reqtype == PROTO_REQ_MULTIBULK && c->multibulklen && c->bulklen != -1 && c->bulklen >= PROTO_MBULK_BIG_ARG) { @@ -2784,7 +2784,7 @@ void readQueryFromClient(connection *conn) { beforeNextClient(c); } -/* A Redis "Address String" is a colon separated ip:port pair. +/* An "Address String" is a colon separated ip:port pair. * For IPv4 it's in the form x.y.z.k:port, example: "127.0.0.1:1234". * For IPv6 addresses we use [] around the IP part, like in "[::1]:1234". * For Unix sockets we use path:0, like in "/tmp/redis:0". @@ -3711,11 +3711,11 @@ void helloCommand(client *c) { /* This callback is bound to POST and "Host:" command names. Those are not * really commands, but are used in security attacks in order to talk to - * Redis instances via HTTP, with a technique called "cross protocol scripting" - * which exploits the fact that services like Redis will discard invalid + * instances via HTTP, with a technique called "cross protocol scripting" + * which exploits the fact that services like this server will discard invalid * HTTP headers and will process what follows. * - * As a protection against this attack, Redis will terminate the connection + * As a protection against this attack, the server will terminate the connection * when a POST or "Host:" header is seen, and will log the event from * time to time (to avoid creating a DOS as a result of too many logs). */ void securityWarningCommand(client *c) { @@ -3837,7 +3837,7 @@ void rewriteClientCommandArgument(client *c, int i, robj *newval) { } } -/* This function returns the number of bytes that Redis is +/* This function returns the number of bytes that the server is * using to store the reply still not read by the client. * * Note: this function is very fast so can be called as many time as @@ -4122,7 +4122,7 @@ static void pauseClientsByClient(mstime_t endTime, int isPauseClientAll) { * so that a failover without data loss to occur. Replicas will continue to receive * traffic to facilitate this functionality. * - * This function is also internally used by Redis Cluster for the manual + * This function is also internally used by Cluster for the manual * failover procedure implemented by CLUSTER FAILOVER. * * The function always succeed, even if there is already a pause in progress. @@ -4166,7 +4166,7 @@ uint32_t isPausedActionsWithUpdate(uint32_t actions_bitmask) { return (server.paused_actions & actions_bitmask); } -/* This function is called by Redis in order to process a few events from +/* This function is called by the server in order to process a few events from * time to time while blocked into some not interruptible operation. * This allows to reply to clients with the -LOADING error while loading the * data set at startup or after a full resynchronization with the master diff --git a/src/notify.c b/src/notify.c index 2881a48dba..3bf6d5fc62 100644 --- a/src/notify.c +++ b/src/notify.c @@ -93,13 +93,13 @@ sds keyspaceEventsFlagsToString(int flags) { return res; } -/* The API provided to the rest of the Redis core is a simple function: +/* The API provided to the rest of the serer core is a simple function: * * notifyKeyspaceEvent(int type, char *event, robj *key, int dbid); * * 'type' is the notification class we define in `server.h`. * 'event' is a C string representing the event name. - * 'key' is a Redis object representing the key name. + * 'key' is an Object representing the key name. * 'dbid' is the database ID where the key lives. */ void notifyKeyspaceEvent(int type, char *event, robj *key, int dbid) { sds chan; diff --git a/src/object.c b/src/object.c index 8c00adbcba..a67f083a4b 100644 --- a/src/object.c +++ b/src/object.c @@ -1,4 +1,4 @@ -/* Redis Object implementation. +/* Object implementation. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. @@ -173,7 +173,7 @@ robj *createStringObjectFromLongLong(long long value) { /* The function avoids returning a shared integer when LFU/LRU info * are needed, that is, when the object is used as a value in the key - * space(for instance when the INCR command is used), and Redis is + * space(for instance when the INCR command is used), and the server is * configured to evict based on LFU/LRU, so we want LFU/LRU values * specific for each key. */ robj *createStringObjectFromLongLongForValue(long long value) { @@ -649,7 +649,7 @@ robj *tryObjectEncodingEx(robj *o, int try_trim) { if (!sdsEncodedObject(o)) return o; /* It's not safe to encode shared objects: shared objects can be shared - * everywhere in the "object space" of Redis and may end in places where + * everywhere in the "object space" of the server and may end in places where * they are not handled. We handle them only as values in the keyspace. */ if (o->refcount > 1) return o; @@ -1296,7 +1296,7 @@ void inputCatSds(void *result, const char *str) { *info = sdscat(*info, str); } -/* This implements MEMORY DOCTOR. An human readable analysis of the Redis +/* This implements MEMORY DOCTOR. An human readable analysis of the server * memory condition. */ sds getMemoryDoctorReport(void) { int empty = 0; /* Instance is empty or almost empty. */ @@ -1425,7 +1425,7 @@ int objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle, } } else if (lru_idle >= 0) { /* Provided LRU idle time is in seconds. Scale - * according to the LRU clock resolution this Redis + * according to the LRU clock resolution this * instance was compiled with (normally 1000 ms, so the * below statement will expand to lru_idle*1000/1000. */ lru_idle = lru_idle*lru_multiplier/LRU_CLOCK_RESOLUTION; @@ -1457,7 +1457,7 @@ robj *objectCommandLookupOrReply(client *c, robj *key, robj *reply) { return o; } -/* Object command allows to inspect the internals of a Redis Object. +/* Object command allows to inspect the internals of an Object. * Usage: OBJECT */ void objectCommand(client *c) { robj *o; @@ -1513,7 +1513,7 @@ NULL } /* The memory command will eventually be a complete interface for the - * memory introspection capabilities of Redis. + * memory introspection capabilities of the server. * * Usage: MEMORY usage */ void memoryCommand(client *c) { diff --git a/src/pqsort.c b/src/pqsort.c index fab54e026a..ca4f99359d 100644 --- a/src/pqsort.c +++ b/src/pqsort.c @@ -1,5 +1,5 @@ /* The following is the NetBSD libc qsort implementation modified in order to - * support partial sorting of ranges for Redis. + * support partial sorting of ranges. * * Copyright(C) 2009-2012 Salvatore Sanfilippo. All rights reserved. * diff --git a/src/pqsort.h b/src/pqsort.h index 824ab5c096..5f1c5c1cd3 100644 --- a/src/pqsort.h +++ b/src/pqsort.h @@ -1,5 +1,5 @@ /* The following is the NetBSD libc qsort implementation modified in order to - * support partial sorting of ranges for Redis. + * support partial sorting of ranges. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. diff --git a/src/pubsub.c b/src/pubsub.c index b6db719b66..1fcad2565b 100644 --- a/src/pubsub.c +++ b/src/pubsub.c @@ -100,7 +100,7 @@ pubsubtype pubSubShardType = { *----------------------------------------------------------------------------*/ /* Send a pubsub message of type "message" to the client. - * Normally 'msg' is a Redis object containing the string to send as + * Normally 'msg' is an Object containing the string to send as * message. However if the caller sets 'msg' as NULL, it will be able * to send a special message (for instance an Array type) by using the * addReply*() API family. */ @@ -315,7 +315,7 @@ int pubsubUnsubscribeChannel(client *c, robj *channel, int notify, pubsubtype ty if (dictSize(clients) == 0) { /* Free the dict and associated hash entry at all if this was * the latest client, so that it will be possible to abuse - * Redis PUBSUB creating millions of channels. */ + * PUBSUB creating millions of channels. */ kvstoreDictDelete(*type.serverPubSubChannels, slot, channel); } } diff --git a/src/rax.h b/src/rax.h index c58c28b2c6..3604057d6f 100644 --- a/src/rax.h +++ b/src/rax.h @@ -159,7 +159,7 @@ typedef struct raxStack { * This callback is used to perform very low level analysis of the radix tree * structure, scanning each possible node (but the root node), or in order to * reallocate the nodes to reduce the allocation fragmentation (this is the - * Redis application for this callback). + * server's application for this callback). * * This is currently only supported in forward iterations (raxNext) */ typedef int (*raxNodeCallback)(raxNode **noderef); diff --git a/src/rdb.c b/src/rdb.c index 5a63e66384..447432cc3e 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -127,7 +127,7 @@ int rdbLoadType(rio *rdb) { } /* This is only used to load old databases stored with the RDB_OPCODE_EXPIRETIME - * opcode. New versions of Redis store using the RDB_OPCODE_EXPIRETIME_MS + * opcode. New versions of the server store using the RDB_OPCODE_EXPIRETIME_MS * opcode. On error -1 is returned, however this could be a valid time, so * to check for loading errors the caller should call rioGetReadError() after * calling this function. */ @@ -144,13 +144,13 @@ ssize_t rdbSaveMillisecondTime(rio *rdb, long long t) { } /* This function loads a time from the RDB file. It gets the version of the - * RDB because, unfortunately, before Redis 5 (RDB version 9), the function + * RDB because, unfortunately, before Redis OSS 5 (RDB version 9), the function * failed to convert data to/from little endian, so RDB files with keys having * expires could not be shared between big endian and little endian systems * (because the expire time will be totally wrong). The fix for this is just * to call memrev64ifbe(), however if we fix this for all the RDB versions, * this call will introduce an incompatibility for big endian systems: - * after upgrading to Redis version 5 they will no longer be able to load their + * after upgrading to Redis OSS version 5 they will no longer be able to load their * own old RDB files. Because of that, we instead fix the function only for new * RDB versions, and load older RDB versions as we used to do in the past, * allowing big endian systems to load their own old RDB files. @@ -250,7 +250,7 @@ int rdbLoadLenByRef(rio *rdb, int *isencoded, uint64_t *lenptr) { /* This is like rdbLoadLenByRef() but directly returns the value read * from the RDB stream, signaling an error by returning RDB_LENERR - * (since it is a too large count to be applicable in any Redis data + * (since it is a too large count to be applicable in any server data * structure). */ uint64_t rdbLoadLen(rio *rdb, int *isencoded) { uint64_t len; @@ -490,7 +490,7 @@ ssize_t rdbSaveLongLongAsStringObject(rio *rdb, long long value) { return nwritten; } -/* Like rdbSaveRawString() gets a Redis object instead. */ +/* Like rdbSaveRawString() gets an Object instead. */ ssize_t rdbSaveStringObject(rio *rdb, robj *obj) { /* Avoid to decode the object, then encode it again, if the * object is already integer encoded. */ @@ -505,13 +505,13 @@ ssize_t rdbSaveStringObject(rio *rdb, robj *obj) { /* Load a string object from an RDB file according to flags: * * RDB_LOAD_NONE (no flags): load an RDB object, unencoded. - * RDB_LOAD_ENC: If the returned type is a Redis object, try to + * RDB_LOAD_ENC: If the returned type is an Object, try to * encode it in a special way to be more memory * efficient. When this flag is passed the function * no longer guarantees that obj->ptr is an SDS string. * RDB_LOAD_PLAIN: Return a plain string allocated with zmalloc() - * instead of a Redis object with an sds in it. - * RDB_LOAD_SDS: Return an SDS string instead of a Redis object. + * instead of an Object with an sds in it. + * RDB_LOAD_SDS: Return an SDS string instead of an Object. * * On I/O error NULL is returned. */ @@ -809,7 +809,7 @@ size_t rdbSaveStreamConsumers(rio *rdb, streamCG *cg) { return nwritten; } -/* Save a Redis object. +/* Save an Object. * Returns -1 on error, number of bytes written on success. */ ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key, int dbid) { ssize_t n = 0, nwritten = 0; @@ -1377,7 +1377,7 @@ ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) { } /* Produces a dump of the database in RDB format sending it to the specified - * Redis I/O channel. On success C_OK is returned, otherwise C_ERR + * I/O channel. On success C_OK is returned, otherwise C_ERR * is returned and part of the output, or all the output, can be * missing because of I/O errors. * @@ -1632,7 +1632,7 @@ void rdbRemoveTempFile(pid_t childpid, int from_signal) { /* This function is called by rdbLoadObject() when the code is in RDB-check * mode and we find a module value of type 2 that can be parsed without * the need of the actual module. The value is parsed for errors, finally - * a dummy redis object is returned just to conform to the API. */ + * a dummy Object is returned just to conform to the API. */ robj *rdbLoadCheckModuleValue(rio *rdb, char *modulename) { uint64_t opcode; while((opcode = rdbLoadLen(rdb,NULL)) != RDB_MODULE_OPCODE_EOF) { @@ -1830,7 +1830,7 @@ int lpValidateIntegrityAndDups(unsigned char *lp, size_t size, int deep, int pai return ret; } -/* Load a Redis object of the specified type from the specified file. +/* Load an Object of the specified type from the specified file. * On success a newly allocated object is returned, otherwise NULL. * When the function returns NULL and if 'error' is not NULL, the * integer pointed by 'error' is set to the type of error that occurred */ @@ -2277,7 +2277,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) { return NULL; } /* Convert to ziplist encoded hash. This must be deprecated - * when loading dumps created by Redis 2.4 gets deprecated. */ + * when loading dumps created by Redis OSS 2.4 gets deprecated. */ { unsigned char *lp = lpNew(0); unsigned char *zi = zipmapRewind(o->ptr); @@ -3196,7 +3196,7 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin decrRefCount(auxval); continue; /* Read type again. */ } else if (type == RDB_OPCODE_MODULE_AUX) { - /* Load module data that is not related to the Redis key space. + /* Load module data that is not related to the server key space. * Such data can be potentially be stored both before and after the * RDB keys-values section. */ uint64_t moduleid = rdbLoadLen(rdb,NULL); @@ -3391,7 +3391,7 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin return C_OK; /* Unexpected end of file is handled here calling rdbReportReadError(): - * this will in turn either abort Redis in most cases, or if we are loading + * this will in turn either abort the server in most cases, or if we are loading * the RDB file from a socket during initial SYNC (diskless replica mode), * we'll report the error to the caller, so that we can retry. */ eoferr: diff --git a/src/redismodule.h b/src/redismodule.h index 309ab312d3..9c3d06d69d 100644 --- a/src/redismodule.h +++ b/src/redismodule.h @@ -712,7 +712,7 @@ #define RedisModuleEvent_Shutdown ValkeyModuleEvent_Shutdown #define RedisModuleEvent_ReplicaChange ValkeyModuleEvent_ReplicaChange #define RedisModuleEvent_CronLoop ValkeyModuleEvent_CronLoop -#define RedisModuleEvent_MasterLinkChange ValkeyModuleEvent_MasterLinkChange +#define RedisModuleEvent_PrimaryLinkChange ValkeyModuleEvent_PrimaryLinkChange #define RedisModuleEvent_ModuleChange ValkeyModuleEvent_ModuleChange #define RedisModuleEvent_LoadingProgress ValkeyModuleEvent_LoadingProgress #define RedisModuleEvent_SwapDB ValkeyModuleEvent_SwapDB diff --git a/src/release.c b/src/release.c index fdc1dbd5b2..a8b6fa188f 100644 --- a/src/release.c +++ b/src/release.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -/* Every time the Redis Git SHA1 or Dirty status changes only this small +/* Every time the Git SHA1 or Dirty status changes only this small * file is recompiled, as we access this information in all the other * files using this functions. */ diff --git a/src/replication.c b/src/replication.c index 49a8455985..323d065221 100644 --- a/src/replication.c +++ b/src/replication.c @@ -310,7 +310,7 @@ int prepareReplicasToWrite(void) { return prepared; } -/* Wrapper for feedReplicationBuffer() that takes Redis string objects +/* Wrapper for feedReplicationBuffer() that takes string Objects * as input. */ void feedReplicationBufferWithObject(robj *o) { char llstr[LONG_STR_SIZE]; @@ -1128,7 +1128,7 @@ void syncCommand(client *c) { } } else { /* If a slave uses SYNC, we are dealing with an old implementation - * of the replication protocol (like redis-cli --slave). Flag the client + * of the replication protocol (like valkey-cli --slave). Flag the client * so that we don't expect to receive REPLCONF ACK feedbacks. */ c->flags |= CLIENT_PRE_PSYNC; } @@ -1240,7 +1240,7 @@ void syncCommand(client *c) { * * - listening-port * - ip-address - * What is the listening ip and port of the Replica redis instance, so that + * What is the listening ip and port of the Replica instance, so that * the master can accurately lists replicas and their listening ports in the * INFO output. * @@ -2194,7 +2194,7 @@ void readSyncBulkPayload(connection *conn) { /* We reach this point in one of the following cases: * * 1. The replica is using diskless replication, that is, it reads data - * directly from the socket to the Redis memory, without using + * directly from the socket to the server memory, without using * a temporary RDB file on disk. In that case we just block and * read everything from the socket. * @@ -2511,7 +2511,7 @@ char *sendCommand(connection *conn, ...) { size_t argslen = 0; char *arg; - /* Create the command to send to the master, we use redis binary + /* Create the command to send to the master, we use binary * protocol to make sure correct arguments are sent. This function * is not safe for all binary data. */ va_start(ap,conn); @@ -3347,7 +3347,7 @@ void syncWithMaster(connection *conn) { /* We accept only two replies as valid, a positive +PONG reply * (we just check for "+") or an authentication error. - * Note that older versions of Redis replied with "operation not + * Note that older versions of Redis OSS replied with "operation not * permitted" instead of using a proper error code, so we test * both. */ if (err[0] != '+' && @@ -3447,7 +3447,7 @@ void syncWithMaster(connection *conn) { if (server.repl_state == REPL_STATE_RECEIVE_PORT_REPLY) { err = receiveSynchronousResponse(conn); if (err == NULL) goto no_response_error; - /* Ignore the error if any, not all the Redis versions support + /* Ignore the error if any, not all the Redis OSS versions support * REPLCONF listening-port. */ if (err[0] == '-') { serverLog(LL_NOTICE,"(Non critical) Master does not understand " @@ -3465,7 +3465,7 @@ void syncWithMaster(connection *conn) { if (server.repl_state == REPL_STATE_RECEIVE_IP_REPLY) { err = receiveSynchronousResponse(conn); if (err == NULL) goto no_response_error; - /* Ignore the error if any, not all the Redis versions support + /* Ignore the error if any, not all the Redis OSS versions support * REPLCONF ip-address. */ if (err[0] == '-') { serverLog(LL_NOTICE,"(Non critical) Master does not understand " @@ -3503,7 +3503,7 @@ void syncWithMaster(connection *conn) { if (server.repl_state == REPL_STATE_RECEIVE_CAPA_REPLY) { err = receiveSynchronousResponse(conn); if (err == NULL) goto no_response_error; - /* Ignore the error if any, not all the Redis versions support + /* Ignore the error if any, not all the Redis OSS versions support * REPLCONF capa. */ if (err[0] == '-') { serverLog(LL_NOTICE,"(Non critical) Master does not understand " @@ -4233,9 +4233,9 @@ int checkGoodReplicasStatus(void) { } /* ----------------------- SYNCHRONOUS REPLICATION -------------------------- - * Redis synchronous replication design can be summarized in points: + * Synchronous replication design can be summarized in points: * - * - Redis masters have a global replication offset, used by PSYNC. + * - Masters have a global replication offset, used by PSYNC. * - Master increment the offset every time new commands are sent to slaves. * - Slaves ping back masters with the offset processed so far. * @@ -4545,7 +4545,7 @@ void replicationCron(void) { listLength(server.slaves)) { /* Note that we don't send the PING if the clients are paused during - * a Redis Cluster manual failover: the PING we send will otherwise + * a Cluster manual failover: the PING we send will otherwise * alter the replication offsets of master and slave, and will no longer * match the one stored into 'mf_master_offset' state. */ int manual_failover_in_progress = @@ -4663,7 +4663,7 @@ void replicationCron(void) { replicationStartPendingFork(); - /* Remove the RDB file used for replication if Redis is not running + /* Remove the RDB file used for replication if the server is not running * with any persistence. */ removeRDBUsedToSyncReplicas(); diff --git a/src/resp_parser.c b/src/resp_parser.c index b92a74cffb..df96d78a84 100644 --- a/src/resp_parser.c +++ b/src/resp_parser.c @@ -49,7 +49,7 @@ * time of parsing. Callers may calculate it themselves after parsing the * entire collection. * - * NOTE: This parser is designed to only handle replies generated by Redis + * NOTE: This parser is designed to only handle replies generated by the server * itself. It does not perform many required validations and thus NOT SAFE FOR * PARSING USER INPUT. * ---------------------------------------------------------------------------------------- diff --git a/src/rio.c b/src/rio.c index 9546fec41f..a86f3d44de 100644 --- a/src/rio.c +++ b/src/rio.c @@ -477,7 +477,7 @@ uint8_t rioCheckType(rio *r) { /* --------------------------- Higher level interface -------------------------- * * The following higher level functions use lower level rio.c functions to help - * generating the Redis protocol for the Append Only File. */ + * generating the RESP for the Append Only File. */ /* Write multi bulk count in the format: "*\r\n". */ size_t rioWriteBulkCount(rio *r, char prefix, long count) { diff --git a/src/script.c b/src/script.c index fd7c616717..db42e2f455 100644 --- a/src/script.c +++ b/src/script.c @@ -424,7 +424,7 @@ static int scriptVerifyClusterState(scriptRunCtx *run_ctx, client *c, client *or if (!server.cluster_enabled || mustObeyClient(original_c)) { return C_OK; } - /* If this is a Redis Cluster node, we need to make sure the script is not + /* If this is a Cluster node, we need to make sure the script is not * trying to access non-local keys, with the exception of commands * received from our master or when loading the AOF back in memory. */ int error_code; @@ -527,7 +527,7 @@ static int scriptVerifyAllowStale(client *c, sds *err) { return C_ERR; } -/* Call a Redis command. +/* Call a server command. * The reply is written to the run_ctx client and it is * up to the engine to take and parse. * The err out variable is set only if error occurs and describe the error. diff --git a/src/script.h b/src/script.h index caf95ef959..307eb41bd0 100644 --- a/src/script.h +++ b/src/script.h @@ -32,9 +32,9 @@ /* * Script.c unit provides an API for functions and eval - * to interact with Redis. Interaction includes mostly + * to interact with the server. Interaction includes mostly * executing commands, but also functionalities like calling - * Redis back on long scripts or check if the script was killed. + * the server back on long scripts or check if the script was killed. * * The interaction is done using a scriptRunCtx object that * need to be created by the user and initialized using scriptPrepareForRun. @@ -44,7 +44,7 @@ * acl, cluster, read only run, ...) * 2. Set Resp * 3. Set Replication method (AOF/REPLICATION/NONE) - * 4. Call Redis back to on long running scripts to allow Redis reply + * 4. Call the server back to on long running scripts to allow the server reply * to clients and perform script kill */ diff --git a/src/script_lua.c b/src/script_lua.c index fbfc73eb9f..24784c1d85 100644 --- a/src/script_lua.c +++ b/src/script_lua.c @@ -55,7 +55,7 @@ static char *libraries_allow_list[] = { NULL, }; -/* Redis Lua API globals */ +/* Lua API globals */ static char *redis_api_allow_list[] = { SERVER_API_NAME, REDIS_API_NAME, @@ -192,21 +192,21 @@ void* luaGetFromRegistry(lua_State* lua, const char* name) { } /* --------------------------------------------------------------------------- - * Redis reply to Lua type conversion functions. + * Server reply to Lua type conversion functions. * ------------------------------------------------------------------------- */ -/* Take a Redis reply in the Redis protocol format and convert it into a +/* Take a server reply in the RESP format and convert it into a * Lua type. Thanks to this function, and the introduction of not connected * clients, it is trivial to implement the redis() lua function. * - * Basically we take the arguments, execute the Redis command in the context + * Basically we take the arguments, execute the command in the context * of a non connected client, then take the generated reply and convert it * into a suitable Lua type. With this trick the scripting feature does not - * need the introduction of a full Redis internals API. The script + * need the introduction of a full server internals API. The script * is like a normal client that bypasses all the slow I/O paths. * * Note: in this function we do not do any sanity check as the reply is - * generated by Redis directly. This allows us to go faster. + * generated by the server directly. This allows us to go faster. * * Errors are returned as a table with a single 'err' field set to the * error string. @@ -536,7 +536,7 @@ void luaPushErrorBuff(lua_State *lua, sds err_buffer) { sds error_code; /* If debugging is active and in step mode, log errors resulting from - * Redis commands. */ + * server commands. */ if (ldbIsEnabled()) { ldbLog(sdscatprintf(sdsempty()," %s",err_buffer)); } @@ -563,8 +563,8 @@ void luaPushErrorBuff(lua_State *lua, sds err_buffer) { msg = err_buffer; error_code = sdsnew("ERR"); } - /* Trim newline at end of string. If we reuse the ready-made Redis error objects (case 1 above) then we might - * have a newline that needs to be trimmed. In any case the lua Redis error table shouldn't end with a newline. */ + /* Trim newline at end of string. If we reuse the ready-made error objects (case 1 above) then we might + * have a newline that needs to be trimmed. In any case the lua server error table shouldn't end with a newline. */ msg = sdstrim(msg, "\r\n"); sds final_msg = sdscatfmt(error_code, " %s", msg); @@ -591,11 +591,11 @@ int luaError(lua_State *lua) { /* --------------------------------------------------------------------------- - * Lua reply to Redis reply conversion functions. + * Lua reply to server reply conversion functions. * ------------------------------------------------------------------------- */ /* Reply to client 'c' converting the top element in the Lua stack to a - * Redis reply. As a side effect the element is consumed from the stack. */ + * server reply. As a side effect the element is consumed from the stack. */ static void luaReplyToRedisReply(client *c, client* script_client, lua_State *lua) { int t = lua_type(lua,-1); @@ -953,7 +953,7 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) { goto cleanup; } - /* Convert the result of the Redis command into a suitable Lua type. + /* Convert the result of the command into a suitable Lua type. * The first thing we need is to create a single string from the client * output buffers. */ if (listLength(c->reply) == 0 && (size_t)c->bufpos < c->buf_usable_size) { @@ -976,7 +976,7 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) { if (raise_error && reply[0] != '-') raise_error = 0; redisProtocolToLuaType(lua,reply); - /* If the debugger is active, log the reply from Redis. */ + /* If the debugger is active, log the reply from the server. */ if (ldbIsEnabled()) ldbLogRedisReply(reply); @@ -1004,9 +1004,9 @@ static int luaRedisGenericCommand(lua_State *lua, int raise_error) { /* Our implementation to lua pcall. * We need this implementation for backward - * comparability with older Redis versions. + * comparability with older Redis OSS versions. * - * On Redis 7, the error object is a table, + * On Redis OSS 7, the error object is a table, * compare to older version where the error * object is a string. To keep backward * comparability we catch the table object @@ -1062,7 +1062,7 @@ static int luaRedisSha1hexCommand(lua_State *lua) { /* Returns a table with a single field 'field' set to the string value * passed as argument. This helper function is handy when returning - * a Redis Protocol error or status reply from Lua: + * a RESP error or status reply from Lua: * * return redis.error_reply("ERR Some Error") * return redis.status_reply("ERR Some Error") @@ -1440,7 +1440,7 @@ void luaRegisterRedisAPI(lua_State* lua) { lua_pushcfunction(lua,luaRedisPcall); lua_setglobal(lua, "pcall"); - /* Register the redis commands table and fields */ + /* Register the commands table and fields */ lua_newtable(lua); /* redis.call */ @@ -1527,7 +1527,7 @@ void luaRegisterRedisAPI(lua_State* lua) { lua_setglobal(lua,"math"); } -/* Set an array of Redis String Objects as a Lua array (table) stored into a +/* Set an array of String Objects as a Lua array (table) stored into a * global variable. */ static void luaCreateArray(lua_State *lua, robj **elev, int elec) { int j; @@ -1540,7 +1540,7 @@ static void luaCreateArray(lua_State *lua, robj **elev, int elec) { } /* --------------------------------------------------------------------------- - * Redis provided math.random + * Custom provided math.random * ------------------------------------------------------------------------- */ /* We replace math.random() with our implementation that is not affected @@ -1735,7 +1735,7 @@ void luaCallFunction(scriptRunCtx* run_ctx, lua_State *lua, robj** keys, size_t } lua_pop(lua,1); /* Consume the Lua error */ } else { - /* On success convert the Lua return value into Redis protocol, and + /* On success convert the Lua return value into RESP, and * send it to * the client. */ luaReplyToRedisReply(c, run_ctx->c, lua); /* Convert and consume the reply. */ } diff --git a/src/script_lua.h b/src/script_lua.h index 07446725d3..73210e6b36 100644 --- a/src/script_lua.h +++ b/src/script_lua.h @@ -38,10 +38,10 @@ * the top of the Lua stack. In addition, parsing the execution * result and convert it to the resp and reply to the client. * - * * Run Redis commands from within the Lua code (Including + * * Run server commands from within the Lua code (Including * parsing the reply and create a Lua object out of it). * - * * Register Redis API to the Lua interpreter. Only shared + * * Register the server API to the Lua interpreter. Only shared * API are registered (API that is only relevant on eval.c * (like debugging) are registered on eval.c). * diff --git a/src/sentinel.c b/src/sentinel.c index 24e8c3ded9..ad2ebdd510 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1,4 +1,4 @@ -/* Redis Sentinel implementation +/* Sentinel implementation * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. @@ -60,7 +60,7 @@ typedef struct sentinelAddr { int port; } sentinelAddr; -/* A Sentinel Redis Instance object is monitoring. */ +/* A Sentinel Instance object is monitoring. */ #define SRI_MASTER (1<<0) #define SRI_SLAVE (1<<1) #define SRI_SENTINEL (1<<2) @@ -477,7 +477,7 @@ const char *preMonitorCfgName[] = { "announce-hostnames" }; -/* This function overwrites a few normal Redis config default with Sentinel +/* This function overwrites a few normal server config default with Sentinel * specific defaults. */ void initSentinelConfig(void) { server.port = REDIS_SENTINEL_PORT; @@ -617,7 +617,7 @@ int sentinelAddrEqualsHostname(sentinelAddr *a, char *hostname) { sentinel.resolve_hostnames ? ANET_NONE : ANET_IP_ONLY) == ANET_ERR) { /* If failed resolve then compare based on hostnames. That is our best effort as - * long as the server is unavailable for some reason. It is fine since Redis + * long as the server is unavailable for some reason. It is fine since an * instance cannot have multiple hostnames for a given setup */ return !strcasecmp(sentinel.resolve_hostnames ? a->hostname : a->ip, hostname); } @@ -649,7 +649,7 @@ sds announceSentinelAddrAndPort(const sentinelAddr *a) { * * 'type' is the message type, also used as a pub/sub channel name. * - * 'ri', is the redis instance target of this event if applicable, and is + * 'ri', is the server instance target of this event if applicable, and is * used to obtain the path of the notification script to execute. * * The remaining arguments are printf-alike. @@ -1260,7 +1260,7 @@ void sentinelDisconnectCallback(const redisAsyncContext *c, int status) { /* ========================== sentinelRedisInstance ========================= */ -/* Create a redis instance, the following fields must be populated by the +/* Create an instance of the server, the following fields must be populated by the * caller if needed: * runid: set to NULL but will be populated once INFO output is received. * info_refresh: is set to 0 to mean that we never received INFO so far. @@ -1406,7 +1406,7 @@ void releaseSentinelRedisInstance(sentinelRedisInstance *ri) { zfree(ri); } -/* Lookup a slave in a master Redis instance, by ip and port. */ +/* Lookup a slave in a master instance, by ip and port. */ sentinelRedisInstance *sentinelRedisInstanceLookupSlave( sentinelRedisInstance *ri, char *slave_addr, int port) { @@ -1696,7 +1696,7 @@ void sentinelPropagateDownAfterPeriod(sentinelRedisInstance *master) { } } -/* This function is used in order to send commands to Redis instances: the +/* This function is used in order to send commands to server instances: the * commands we send from Sentinel may be renamed, a common case is a master * with CONFIG and SLAVEOF commands renamed for security concerns. In that * case we check the ri->renamed_command table (or if the instance is a slave, @@ -2274,10 +2274,10 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) { rewriteConfigMarkAsProcessed(state,"sentinel master-reboot-down-after-period"); } -/* This function uses the config rewriting Redis engine in order to persist +/* This function uses the config rewriting in order to persist * the state of the Sentinel in the current configuration file. * - * On failure the function logs a warning on the Redis log. */ + * On failure the function logs a warning on the server log. */ int sentinelFlushConfig(void) { int saved_hz = server.hz; int rewrite_status; @@ -2356,12 +2356,12 @@ void sentinelSendAuthIfNeeded(sentinelRedisInstance *ri, redisAsyncContext *c) { } } -/* Use CLIENT SETNAME to name the connection in the Redis instance as +/* Use CLIENT SETNAME to name the connection in the instance as * sentinel-- * The connection type is "cmd" or "pubsub" as specified by 'type'. * * This makes it possible to list all the sentinel instances connected - * to a Redis server with CLIENT LIST, grepping for a specific name format. */ + * to a server with CLIENT LIST, grepping for a specific name format. */ void sentinelSetClientName(sentinelRedisInstance *ri, redisAsyncContext *c, char *type) { char name[64]; @@ -2491,7 +2491,7 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) { link->disconnected = 0; } -/* ======================== Redis instances pinging ======================== */ +/* ======================== Server instances pinging ======================== */ /* Return true if master looks "sane", that is: * 1) It is actually a master in the current configuration. @@ -3002,7 +3002,7 @@ void sentinelReceiveHelloMessages(redisAsyncContext *c, void *reply, void *privd sentinelProcessHelloMessage(r->element[2]->str, r->element[2]->len); } -/* Send a "Hello" message via Pub/Sub to the specified 'ri' Redis +/* Send a "Hello" message via Pub/Sub to the specified 'ri' server * instance in order to broadcast the current configuration for this * master, and to advertise the existence of this Sentinel at the same time. * @@ -3071,7 +3071,7 @@ void sentinelForceHelloUpdateDictOfRedisInstances(dict *instances) { } /* This function forces the delivery of a "Hello" message (see - * sentinelSendHello() top comment for further information) to all the Redis + * sentinelSendHello() top comment for further information) to all the server * and Sentinel instances related to the specified 'master'. * * It is technically not needed since we send an update to every instance @@ -3395,7 +3395,7 @@ const char *sentinelFailoverStateStr(int state) { } } -/* Redis instance to Redis protocol representation. */ +/* Server instance to RESP representation. */ void addReplySentinelRedisInstance(client *c, sentinelRedisInstance *ri) { char *flags = sdsempty(); void *mbl; @@ -3800,7 +3800,7 @@ void addReplySentinelDebugInfo(client *c) { } /* Output a number of instances contained inside a dictionary as - * Redis protocol. */ + * RESP. */ void addReplyDictOfRedisInstances(client *c, dict *instances) { dictIterator *di; dictEntry *de; @@ -4868,7 +4868,7 @@ char *sentinelGetLeader(sentinelRedisInstance *master, uint64_t epoch) { /* Send SLAVEOF to the specified instance, always followed by a * CONFIG REWRITE command in order to store the new configuration on disk - * when possible (that is, if the Redis instance is recent enough to support + * when possible (that is, if the instance is recent enough to support * config rewriting, and if the server was started with a configuration file). * * If Host is NULL the function sends "SLAVEOF NO ONE". @@ -4920,9 +4920,9 @@ int sentinelSendSlaveOf(sentinelRedisInstance *ri, const sentinelAddr *addr) { if (retval == C_ERR) return retval; ri->link->pending_commands++; - /* CLIENT KILL TYPE is only supported starting from Redis 2.8.12, + /* CLIENT KILL TYPE is only supported starting from Redis OSS 2.8.12, * however sending it to an instance not understanding this command is not - * an issue because CLIENT is variadic command, so Redis will not + * an issue because CLIENT is variadic command, so the server will not * recognized as a syntax error, and the transaction will not fail (but * only the unsupported command will fail). */ for (int type = 0; type < 2; type++) { @@ -5048,7 +5048,7 @@ int compareSlavesForPromotion(const void *a, const void *b) { /* If the replication offset is the same select the slave with that has * the lexicographically smaller runid. Note that we try to handle runid - * == NULL as there are old Redis versions that don't publish runid in + * == NULL as there are old Redis OSS versions that don't publish runid in * INFO. A NULL runid is considered bigger than any other runid. */ sa_runid = (*sa)->runid; sb_runid = (*sb)->runid; @@ -5374,7 +5374,7 @@ void sentinelAbortFailover(sentinelRedisInstance *ri) { * in design. * -------------------------------------------------------------------------- */ -/* Perform scheduled operations for the specified Redis instance. */ +/* Perform scheduled operations for the specified instance. */ void sentinelHandleRedisInstance(sentinelRedisInstance *ri) { /* ========== MONITORING HALF ============ */ /* Every kind of instance */ @@ -5473,7 +5473,7 @@ void sentinelTimer(void) { sentinelCollectTerminatedScripts(); sentinelKillTimedoutScripts(); - /* We continuously change the frequency of the Redis "timer interrupt" + /* We continuously change the frequency of the server "timer interrupt" * in order to desynchronize every Sentinel from every other. * This non-determinism avoids that Sentinels started at the same time * exactly continue to stay synchronized asking to be voted at the diff --git a/src/server.c b/src/server.c index 143244f384..f1e14832a0 100644 --- a/src/server.c +++ b/src/server.c @@ -105,7 +105,7 @@ const char *replstateToString(int replstate); ((server.current_client && server.current_client->id == CLIENT_ID_AOF) ? 1 : 0) /* We use a private localtime implementation which is fork-safe. The logging - * function of Redis may be called from other threads. */ + * function of the server may be called from other threads. */ void nolocks_localtime(struct tm *tmp, time_t t, time_t tz, int dst); /* Low level logging. To use only for very big messages, otherwise @@ -201,7 +201,7 @@ void serverLogRawFromHandler(int level, const char *msg) { * with LL_RAW flag only the msg is printed (with no new line at the end) * * We actually use this only for signals that are not fatal from the point - * of view of Redis. Signals that are going to kill the server anyway and + * of view of the server. Signals that are going to kill the server anyway and * where we need printf-alike features are served by serverLog(). */ void serverLogFromHandler(int level, const char *fmt, ...) { va_list ap; @@ -267,7 +267,7 @@ void exitFromChild(int retcode) { /*====================== Hash table type implementation ==================== */ /* This is a hash table type that uses the SDS dynamic strings library as - * keys and redis objects as values (objects can hold SDS strings, + * keys and Objects as values (Objects can hold SDS strings, * lists, sets). */ void dictVanillaFree(dict *d, void *val) @@ -423,10 +423,10 @@ uint64_t dictEncObjHash(const void *key) { } /* Return 1 if currently we allow dict to expand. Dict may allocate huge - * memory to contain hash buckets when dict expands, that may lead redis - * rejects user's requests or evicts some keys, we can stop dict to expand + * memory to contain hash buckets when dict expands, that may lead the server to + * reject user's requests or evict some keys, we can stop dict to expand * provisionally if used memory will be over maxmemory after dict expands, - * but to guarantee the performance of redis, we still allow dict to expand + * but to guarantee the performance of the server, we still allow dict to expand * if dict load factor exceeds HASHTABLE_MAX_LOAD_FACTOR. */ int dictResizeAllowed(size_t moreMem, double usedRatio) { /* for debug purposes: dict is not allowed to be resized. */ @@ -439,7 +439,7 @@ int dictResizeAllowed(size_t moreMem, double usedRatio) { } } -/* Generic hash table type where keys are Redis Objects, Values +/* Generic hash table type where keys are Objects, Values * dummy pointers. */ dictType objectKeyPointerValueDictType = { dictEncObjHash, /* hash function */ @@ -487,7 +487,7 @@ dictType zsetDictType = { NULL, /* allow to expand */ }; -/* Db->dict, keys are sds strings, vals are Redis objects. */ +/* Db->dict, keys are sds strings, vals are Objects. */ dictType dbDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ @@ -542,7 +542,7 @@ dictType sdsReplyDictType = { NULL /* allow to expand */ }; -/* Keylist hash table type has unencoded redis objects as keys and +/* Keylist hash table type has unencoded Objects as keys and * lists as values. It's used for blocking operations (BLPOP) and to * map swapped keys to a list of clients waiting for this keys to be loaded. */ dictType keylistDictType = { @@ -555,7 +555,7 @@ dictType keylistDictType = { NULL /* allow to expand */ }; -/* KeyDict hash table type has unencoded redis objects as keys and +/* KeyDict hash table type has unencoded Objects as keys and * dicts as values. It's used for PUBSUB command to track clients subscribing the channels. */ dictType objToDictDictType = { dictObjHash, /* hash function */ @@ -979,7 +979,7 @@ void getExpansiveClientsInfo(size_t *in_usage, size_t *out_usage) { * commands. * * It is very important for this function, and the functions it calls, to be - * very fast: sometimes Redis has tens of hundreds of connected clients, and the + * very fast: sometimes the server has tens of hundreds of connected clients, and the * default server.hz value is 10, so sometimes here we need to process thousands * of clients per second, turning this function into a source of latency. */ @@ -1050,7 +1050,7 @@ void clientsCron(void) { } /* This function handles 'background' operations we are required to do - * incrementally in Redis databases, such as active key expiring, resizing, + * incrementally in the databases, such as active key expiring, resizing, * rehashing. */ void databasesCron(void) { /* Expire keys by random sampling. Not required for slaves @@ -1326,7 +1326,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { * * Note that even if the counter wraps it's not a big problem, * everything will still work but some object will appear younger - * to Redis. However for this to happen a given object should never be + * to the server. However for this to happen a given object should never be * touched for all the time needed to the counter to wrap, which is * not likely. * @@ -1383,7 +1383,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* We need to do a few operations on clients asynchronously. */ clientsCron(); - /* Handle background operations on Redis databases. */ + /* Handle background operations on databases. */ databasesCron(); /* Start a scheduled AOF rewrite if this was requested by the user while @@ -1470,7 +1470,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* Replication cron function -- used to reconnect to master, * detect transfer failures, start background RDB transfers and so forth. * - * If Redis is trying to failover then run the replication cron faster so + * If the server is trying to failover then run the replication cron faster so * progress on the handshake happens more quickly. */ if (server.failover_state != NO_FAILOVER) { run_with_period(100) replicationCron(); @@ -1478,7 +1478,7 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { run_with_period(1000) replicationCron(); } - /* Run the Redis Cluster cron. */ + /* Run the Cluster cron. */ run_with_period(100) { if (server.cluster_enabled) clusterCron(); } @@ -1617,7 +1617,7 @@ static void sendGetackToReplicas(void) { extern int ProcessingEventsWhileBlocked; -/* This function gets called every time Redis is entering the +/* This function gets called every time the server is entering the * main loop of the event driven library, that is, before to sleep * for ready file descriptors. * @@ -1664,8 +1664,8 @@ void beforeSleep(struct aeEventLoop *eventLoop) { /* If any connection type(typical TLS) still has pending unread data don't sleep at all. */ int dont_sleep = connTypeHasPendingData(); - /* Call the Redis Cluster before sleep function. Note that this function - * may change the state of Redis Cluster (from ok to fail or vice versa), + /* Call the Cluster before sleep function. Note that this function + * may change the state of Cluster (from ok to fail or vice versa), * so it's a good idea to call it before serving the unblocked clients * later in this function, must be done before blockedBeforeSleep. */ if (server.cluster_enabled) clusterBeforeSleep(); @@ -1791,7 +1791,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { aeSetDontWait(server.el, dont_sleep); /* Before we are going to sleep, let the threads access the dataset by - * releasing the GIL. Redis main thread will not touch anything at this + * releasing the GIL. The server main thread will not touch anything at this * time. */ if (moduleCount()) moduleReleaseGIL(); /********************* WARNING ******************** @@ -1800,7 +1800,7 @@ void beforeSleep(struct aeEventLoop *eventLoop) { } /* This function is called immediately after the event loop multiplexing - * API returned, and the control is going to soon return to Redis by invoking + * API returned, and the control is going to soon return to the server by invoking * the different events callbacks. */ void afterSleep(struct aeEventLoop *eventLoop) { UNUSED(eventLoop); @@ -2200,7 +2200,7 @@ int restartServer(int flags, mstime_t delay) { } /* Close all file descriptors, with the exception of stdin, stdout, stderr - * which are useful if we restart a Redis server which is not daemonized. */ + * which are useful if we restart a server which is not daemonized. */ for (j = 3; j < (int)server.maxclients + 1024; j++) { /* Test the descriptor validity before closing it, otherwise * Valgrind issues a warning on close(). */ @@ -2233,7 +2233,7 @@ int setOOMScoreAdj(int process_class) { serverAssert(process_class >= 0 && process_class < CONFIG_OOM_COUNT); #ifdef HAVE_PROC_OOM_SCORE_ADJ - /* The following statics are used to indicate Redis has changed the process's oom score. + /* The following statics are used to indicate the server has changed the process's oom score. * And to save the original score so we can restore it later if needed. * We need this so when we disabled oom-score-adj (also during configuration rollback * when another configuration parameter was invalid and causes a rollback after @@ -2249,7 +2249,7 @@ int setOOMScoreAdj(int process_class) { if (server.oom_score_adj != OOM_SCORE_ADJ_NO) { if (!oom_score_adjusted_by_redis) { oom_score_adjusted_by_redis = 1; - /* Backup base value before enabling Redis control over oom score */ + /* Backup base value before enabling the server control over oom score */ fd = open("/proc/self/oom_score_adj", O_RDONLY); if (fd < 0 || read(fd, buf, sizeof(buf)) < 0) { serverLog(LL_WARNING, "Unable to read oom_score_adj: %s", strerror(errno)); @@ -2451,7 +2451,7 @@ int createSocketAcceptHandler(connListener *sfd, aeFileProc *accept_handler) { } /* Initialize a set of file descriptors to listen to the specified 'port' - * binding the addresses specified in the Redis server configuration. + * binding the addresses specified in the server configuration. * * The listening file descriptors are stored in the integer array 'fds' * and their number is set in '*count'. Actually @sfd should be 'listener', @@ -2661,7 +2661,7 @@ void initServer(void) { } server.db = zmalloc(sizeof(server)*server.dbnum); - /* Create the Redis databases, and initialize other internal state. */ + /* Create the databases, and initialize other internal state. */ int slot_count_bits = 0; int flags = KVSTORE_ALLOCATE_DICTS_ON_DEMAND; if (server.cluster_enabled) { @@ -2773,7 +2773,7 @@ void initServer(void) { /* 32 bit instances are limited to 4GB of address space, so if there is * no explicit limit in the user provided configuration we set a limit * at 3 GB using maxmemory with 'noeviction' policy'. This avoids - * useless crashes of the Redis instance for out of memory. */ + * useless crashes of the instance for out of memory. */ if (server.arch_bits == 32 && server.maxmemory == 0) { serverLog(LL_WARNING,"Warning: 32 bit instance detected but no memory limit set. Setting 3 GB maxmemory limit with 'noeviction' policy now."); server.maxmemory = 3072LL*(1024*1024); /* 3 GB */ @@ -2898,7 +2898,7 @@ void InitServerLast(void) { * 3. The order of the range specs must be ascending (i.e. * lastkey of spec[i] == firstkey-1 of spec[i+1]). * - * This function will succeed on all native Redis commands and may + * This function will succeed on all native commands and may * fail on module commands, even if it only has "range" specs that * could actually be "glued", in the following cases: * 1. The order of "range" specs is not ascending (e.g. the spec for @@ -2915,7 +2915,7 @@ void populateCommandLegacyRangeSpec(struct serverCommand *c) { memset(&c->legacy_range_key_spec, 0, sizeof(c->legacy_range_key_spec)); /* Set the movablekeys flag if we have a GETKEYS flag for modules. - * Note that for native redis commands, we always have keyspecs, + * Note that for native commands, we always have keyspecs, * with enough information to rely on for movablekeys. */ if (c->flags & CMD_MODULE_GETKEYS) c->flags |= CMD_MOVABLE_KEYS; @@ -3067,7 +3067,7 @@ int populateCommandStructure(struct serverCommand *c) { extern struct serverCommand serverCommandTable[]; -/* Populates the Redis Command Table dict from the static table in commands.c +/* Populates the Command Table dict from the static table in commands.c * which is auto generated from the json files in the commands folder. */ void populateCommandTable(void) { int j; @@ -3120,7 +3120,7 @@ void resetErrorTableStats(void) { server.errors_enabled = 1; } -/* ========================== Redis OP Array API ============================ */ +/* ========================== OP Array API ============================ */ int serverOpArrayAppend(serverOpArray *oa, int dbid, robj **argv, int argc, int target) { serverOp *op; @@ -3308,8 +3308,8 @@ static void propagateNow(int dbid, robj **argv, int argc, int target) { * after the current command is propagated to AOF / Replication. * * dbid is the database ID the command should be propagated into. - * Arguments of the command to propagate are passed as an array of redis - * objects pointers of len 'argc', using the 'argv' vector. + * Arguments of the command to propagate are passed as an array of + * Objects pointers of len 'argc', using the 'argv' vector. * * The function does not take a reference to the passed 'argv' vector, * so it is up to the caller to release the passed argv (but it is usually @@ -3331,7 +3331,7 @@ void alsoPropagate(int dbid, robj **argv, int argc, int target) { } /* It is possible to call the function forceCommandPropagation() inside a - * Redis command implementation in order to to force the propagation of a + * command implementation in order to to force the propagation of a * specific command execution into AOF / Replication. */ void forceCommandPropagation(client *c, int flags) { serverAssert(c->cmd->flags & (CMD_WRITE | CMD_MAY_REPLICATE)); @@ -3429,7 +3429,7 @@ static void propagatePendingCommands(void) { /* Performs operations that should be performed after an execution unit ends. * Execution unit is a code that should be done atomically. - * Execution units can be nested and are not necessarily starts with Redis command. + * Execution units can be nested and do not necessarily start with a server command. * * For example the following is a logical unit: * active expire -> @@ -3483,7 +3483,7 @@ int incrCommandStatsOnError(struct serverCommand *cmd, int flags) { return res; } -/* Call() is the core of Redis execution of a command. +/* Call() is the core of the server's execution of a command. * * The following flags can be passed: * CMD_CALL_NONE No flags. @@ -3539,7 +3539,7 @@ void call(client *c, int flags) { * demand, and initialize the array for additional commands propagation. */ c->flags &= ~(CLIENT_FORCE_AOF|CLIENT_FORCE_REPL|CLIENT_PREVENT_PROP); - /* Redis core is in charge of propagation when the first entry point + /* The server core is in charge of propagation when the first entry point * of call() is processCommand(). * The only other option to get to call() without having processCommand * as an entry point is if a module triggers RM_Call outside of call() @@ -4437,7 +4437,7 @@ int finishShutdown(void) { * doing it's cleanup, but in this case this code will not be reached, * so we need to call rdbRemoveTempFile which will close fd(in order * to unlink file actually) in background thread. - * The temp rdb file fd may won't be closed when redis exits quickly, + * The temp rdb file fd may won't be closed when the server exits quickly, * but OS will close this fd when process exits. */ rdbRemoveTempFile(server.child_pid, 0); } @@ -4489,7 +4489,7 @@ int finishShutdown(void) { if (rdbSave(SLAVE_REQ_NONE,server.rdb_filename,rsiptr,RDBFLAGS_KEEP_CACHE) != C_OK) { /* Ooops.. error saving! The best we can do is to continue * operating. Note that if there was a background saving process, - * in the next cron() Redis will be notified that the background + * in the next cron() the server will be notified that the background * saving aborted, handling special stuff like slaves pending for * synchronization... */ if (force) { @@ -4542,8 +4542,8 @@ int finishShutdown(void) { /*================================== Commands =============================== */ -/* Sometimes Redis cannot accept write commands because there is a persistence - * error with the RDB or AOF file, and Redis is configured in order to stop +/* Sometimes the server cannot accept write commands because there is a persistence + * error with the RDB or AOF file, and the server is configured in order to stop * accepting writes in such situation. This function returns if such a * condition is active, and the type of the condition. * @@ -4957,7 +4957,7 @@ void addReplyCommandSubCommands(client *c, struct serverCommand *cmd, void (*rep dictReleaseIterator(di); } -/* Output the representation of a Redis command. Used by the COMMAND command and COMMAND INFO. */ +/* Output the representation of a server command. Used by the COMMAND command and COMMAND INFO. */ void addReplyCommandInfo(client *c, struct serverCommand *cmd) { if (!cmd) { addReplyNull(c); @@ -4985,7 +4985,7 @@ void addReplyCommandInfo(client *c, struct serverCommand *cmd) { } } -/* Output the representation of a Redis command. Used by the COMMAND DOCS. */ +/* Output the representation of a server command. Used by the COMMAND DOCS. */ void addReplyCommandDocs(client *c, struct serverCommand *cmd) { /* Count our reply len so we don't have to use deferred reply. */ long maplen = 1; @@ -6265,7 +6265,7 @@ void daemonize(void) { if (fork() != 0) exit(0); /* parent exits */ setsid(); /* create a new session */ - /* Every output goes to /dev/null. If Redis is daemonized but + /* Every output goes to /dev/null. If the server is daemonized but * the 'logfile' is set to 'stdout' in the configuration file * it will not log at all. */ if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { @@ -7006,8 +7006,8 @@ int main(int argc, char **argv) { initSentinel(); } - /* Check if we need to start in redis-check-rdb/aof mode. We just execute - * the program main. However the program is part of the Redis executable + /* Check if we need to start in valkey-check-rdb/aof mode. We just execute + * the program main. However the program is part of the server executable * so that we can easily execute an RDB check on loading errors. */ if (strstr(exec_name,"valkey-check-rdb") != NULL) redis_check_rdb_main(argc,argv,NULL); diff --git a/src/server.h b/src/server.h index 3f4561df0a..a8e7464acd 100644 --- a/src/server.h +++ b/src/server.h @@ -82,9 +82,9 @@ typedef long long ustime_t; /* microsecond time type. */ #define VALKEYMODULE_CORE 1 typedef struct serverObject robj; -#include "valkeymodule.h" /* Redis modules API defines. */ +#include "valkeymodule.h" /* Modules API defines. */ -/* Following includes allow test functions to be called from Redis main() */ +/* Following includes allow test functions to be called from main() */ #include "zipmap.h" #include "ziplist.h" /* Compact list data structure */ #include "sha1.h" @@ -227,7 +227,7 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; #define CMD_NO_MANDATORY_KEYS (1ULL<<19) #define CMD_PROTECTED (1ULL<<20) #define CMD_MODULE_GETKEYS (1ULL<<21) /* Use the modules getkeys interface. */ -#define CMD_MODULE_NO_CLUSTER (1ULL<<22) /* Deny on Redis Cluster. */ +#define CMD_MODULE_NO_CLUSTER (1ULL<<22) /* Deny on Cluster. */ #define CMD_NO_ASYNC_LOADING (1ULL<<23) #define CMD_NO_MULTI (1ULL<<24) #define CMD_MOVABLE_KEYS (1ULL<<25) /* The legacy range spec doesn't cover all keys. @@ -583,7 +583,7 @@ typedef enum { #define OOM_SCORE_RELATIVE 1 #define OOM_SCORE_ADJ_ABSOLUTE 2 -/* Redis maxmemory strategies. Instead of using just incremental number +/* Server maxmemory strategies. Instead of using just incremental number * for this defines, we use a set of flags so that testing for certain * properties common to multiple policies is faster. */ #define MAXMEMORY_FLAG_LRU (1<<0) @@ -722,9 +722,9 @@ typedef enum { * Data types *----------------------------------------------------------------------------*/ -/* A redis object, that is a type able to hold a string / list / set */ +/* An Object, that is a type able to hold a string / list / set */ -/* The actual Redis Object */ +/* The actual Object */ #define OBJ_STRING 0 /* String object. */ #define OBJ_LIST 1 /* List object. */ #define OBJ_SET 2 /* Set object. */ @@ -732,7 +732,7 @@ typedef enum { #define OBJ_HASH 4 /* Hash object. */ /* The "module" object type is a special one that signals that the object - * is one directly managed by a Redis module. In this case the value points + * is one directly managed by a module. In this case the value points * to a moduleValue struct, which contains the object value (which is only * handled by the module itself) and the ValkeyModuleType struct which lists * function pointers in order to serialize, deserialize, AOF-rewrite and @@ -815,7 +815,7 @@ typedef struct ValkeyModuleType { char name[10]; /* 9 bytes name + null term. Charset: A-Z a-z 0-9 _- */ } moduleType; -/* In Redis objects 'robj' structures of type OBJ_MODULE, the value pointer +/* In Object 'robj' structures of type OBJ_MODULE, the value pointer * is set to the following structure, referencing the moduleType structure * in order to work with the value, and at the same time providing a raw * pointer to the value, as created by the module commands operating with @@ -860,7 +860,7 @@ struct ValkeyModule { }; typedef struct ValkeyModule ValkeyModule; -/* This is a wrapper for the 'rio' streams used inside rdb.c in Redis, so that +/* This is a wrapper for the 'rio' streams used inside rdb.c in the server, so that * the user does not have to take the total count of the written bytes nor * to care about error conditions. */ struct ValkeyModuleIO { @@ -888,7 +888,7 @@ struct ValkeyModuleIO { iovar.pre_flush_buffer = NULL; \ } while(0) -/* This is a structure used to export DEBUG DIGEST capabilities to Redis +/* This is a structure used to export DEBUG DIGEST capabilities to * modules. We want to capture both the ordered and unordered elements of * a data structure, so that a digest can be created in a way that correctly * reflects the values. See the DEBUG DIGEST command implementation for more @@ -947,7 +947,7 @@ struct serverObject { * and Module types have their registered name returned. */ char *getObjectTypeName(robj*); -/* Macro used to initialize a Redis object allocated on the stack. +/* Macro used to initialize an Object allocated on the stack. * Note that this macro is taken near the structure definition to make sure * we'll update it when the structure is changed, to avoid bugs like * bug #85 introduced exactly in this way. */ @@ -1001,7 +1001,7 @@ typedef struct replDataBufBlock { char buf[]; } replDataBufBlock; -/* Redis database representation. There are multiple databases identified +/* Database representation. There are multiple databases identified * by integers from 0 (the default database) up to the max configured * database. The database number is the 'id' field in the structure. */ typedef struct serverDb { @@ -1088,7 +1088,7 @@ typedef struct blockingState { * After the execution of every command or script, we iterate over this list to check * if as a result we should serve data to clients blocked, unblocking them. * Note that server.ready_keys will not have duplicates as there dictionary - * also called ready_keys in every structure representing a Redis database, + * also called ready_keys in every structure representing a database, * where we make sure to remember if a given key was already added in the * server.ready_keys list. */ typedef struct readyList { @@ -1096,7 +1096,7 @@ typedef struct readyList { robj *key; } readyList; -/* This structure represents a Redis user. This is useful for ACLs, the +/* This structure represents a user. This is useful for ACLs, the * user is associated to the connection after the connection is authenticated. * If there is no associated user, the connection uses the default user. */ #define USER_COMMAND_BITS_COUNT 1024 /* The total number of command bits @@ -1281,10 +1281,10 @@ typedef struct client { * changes. */ void *auth_callback_privdata; /* Private data that is passed when the auth * changed callback is executed. Opaque for - * Redis Core. */ + * the Server Core. */ void *auth_module; /* The module that owns the callback, which is used * to disconnect the client if the module is - * unloaded for cleanup. Opaque for Redis Core.*/ + * unloaded for cleanup. Opaque for the Server Core.*/ /* If this client is in tracking mode and this field is non zero, * invalidation messages for keys fetched by this client will be sent to @@ -1409,7 +1409,7 @@ typedef struct clientBufferLimitsConfig { extern clientBufferLimitsConfig clientBufferLimitsDefaults[CLIENT_TYPE_OBUF_COUNT]; -/* The serverOp structure defines a Redis Operation, that is an instance of +/* The serverOp structure defines an Operation, that is an instance of * a command with an argument vector, database ID, propagation target * (PROPAGATE_*), and command pointer. * @@ -1420,7 +1420,7 @@ typedef struct serverOp { int argc, dbid, target; } serverOp; -/* Defines an array of Redis operations. There is an API to add to this +/* Defines an array of Operations. There is an API to add to this * structure in an easy way. * * int serverOpArrayAppend(serverOpArray *oa, int dbid, robj **argv, int argc, int target); @@ -1564,7 +1564,7 @@ typedef struct { *----------------------------------------------------------------------------*/ /* AIX defines hz to __hz, we don't use this define and in order to allow - * Redis build on AIX we need to undef it. */ + * the server build on AIX we need to undef it. */ #ifdef _AIX #undef hz #endif @@ -2073,7 +2073,7 @@ struct valkeyServer { int cluster_announce_port; /* base port to announce on cluster bus. */ int cluster_announce_tls_port; /* TLS port to announce on cluster bus. */ int cluster_announce_bus_port; /* bus port to announce on cluster bus. */ - int cluster_module_flags; /* Set of flags that Redis modules are able + int cluster_module_flags; /* Set of flags that modules are able to set in order to suppress certain native Redis Cluster features. Check the VALKEYMODULE_CLUSTER_FLAG_*. */ @@ -2114,7 +2114,7 @@ struct valkeyServer { int tls_auth_clients; serverTLSContextConfig tls_ctx_config; /* cpu affinity */ - char *server_cpulist; /* cpu affinity list of redis server main/io thread. */ + char *server_cpulist; /* cpu affinity list of server main/io thread. */ char *bio_cpulist; /* cpu affinity list of bio thread. */ char *aof_rewrite_cpulist; /* cpu affinity list of aof rewrite process. */ char *bgsave_cpulist; /* cpu affinity list of bgsave process. */ @@ -2302,7 +2302,7 @@ typedef enum { typedef void serverCommandProc(client *c); typedef int serverGetKeysProc(struct serverCommand *cmd, robj **argv, int argc, getKeysResult *result); -/* Redis command structure. +/* Command structure. * * Note that the command table is in commands.c and it is auto-generated. * @@ -2417,7 +2417,7 @@ struct serverCommand { keySpec *key_specs; int key_specs_num; /* Use a function to determine keys arguments in a command line. - * Used for Redis Cluster redirect (may be NULL) */ + * Used for Cluster redirect (may be NULL) */ serverGetKeysProc *getkeys_proc; int num_args; /* Length of args array. */ /* Array of subcommands (may be NULL) */ @@ -2816,7 +2816,7 @@ void discardTransaction(client *c); void flagTransaction(client *c); void execCommandAbort(client *c, sds error); -/* Redis object implementation */ +/* Object implementation */ void decrRefCount(robj *o); void decrRefCountVoid(void *o); void incrRefCount(robj *o); @@ -3444,7 +3444,7 @@ void sentinelInfoCommand(client *c); void sentinelPublishCommand(client *c); void sentinelRoleCommand(client *c); -/* redis-check-rdb & aof */ +/* valkey-check-rdb & aof */ int redis_check_rdb(char *rdbfilename, FILE *fp); int redis_check_rdb_main(int argc, char **argv, FILE *fp); int redis_check_aof_main(int argc, char **argv); diff --git a/src/serverassert.h b/src/serverassert.h index da1b7c7fa2..945549f895 100644 --- a/src/serverassert.h +++ b/src/serverassert.h @@ -1,8 +1,8 @@ /* serverassert.h -- Drop in replacements assert.h that prints the stack trace - * in the Redis logs. + * in the server logs. * * This file should be included instead of "assert.h" inside libraries used by - * Redis that are using assertions, so instead of Redis disappearing with + * the server that are using assertions, so instead of the server disappearing with * SIGABORT, we get the details and stack trace inside the log file. * * ---------------------------------------------------------------------------- diff --git a/src/slowlog.c b/src/slowlog.c index a68064af2d..b6d743aa06 100644 --- a/src/slowlog.c +++ b/src/slowlog.c @@ -5,7 +5,7 @@ * using the 'slowlog-log-slower-than' config directive, that is also * readable and writable using the CONFIG SET/GET command. * - * The slow queries log is actually not "logged" in the Redis log file + * The slow queries log is actually not "logged" in the server log file * but is accessible thanks to the SLOWLOG command. * * ---------------------------------------------------------------------------- @@ -78,7 +78,7 @@ slowlogEntry *slowlogCreateEntry(client *c, robj **argv, int argc, long long dur /* Here we need to duplicate the string objects composing the * argument vector of the command, because those may otherwise * end shared with string objects stored into keys. Having - * shared objects between any part of Redis, and the data + * shared objects between any part of the server, and the data * structure holding the data, is a problem: FLUSHALL ASYNC * may release the shared string object and create a race. */ se->argv[j] = dupStringObject(argv[j]); @@ -138,7 +138,7 @@ void slowlogReset(void) { } /* The SLOWLOG command. Implements all the subcommands needed to handle the - * Redis slow log. */ + * slow log. */ void slowlogCommand(client *c) { if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { const char *help[] = { diff --git a/src/socket.c b/src/socket.c index 775d56d375..70b77d4b42 100644 --- a/src/socket.c +++ b/src/socket.c @@ -31,7 +31,7 @@ #include "connhelpers.h" /* The connections module provides a lean abstraction of network connections - * to avoid direct socket and async event management across the Redis code base. + * to avoid direct socket and async event management across the server code base. * * It does NOT provide advanced connection features commonly found in similar * libraries such as complete in/out buffer management, throttling, etc. These diff --git a/src/sort.c b/src/sort.c index 78d15a7199..dea9bb73e3 100644 --- a/src/sort.c +++ b/src/sort.c @@ -185,7 +185,7 @@ int sortCompare(const void *s1, const void *s2) { return server.sort_desc ? -cmp : cmp; } -/* The SORT command is the most complex command in Redis. Warning: this code +/* The SORT command is the most complex command in the server. Warning: this code * is optimized for speed and a bit less for readability */ void sortCommandGeneric(client *c, int readonly) { list *operations; diff --git a/src/syncio.c b/src/syncio.c index b2843d5fbc..b6aa6a8e76 100644 --- a/src/syncio.c +++ b/src/syncio.c @@ -32,7 +32,7 @@ /* ----------------- Blocking sockets I/O with timeouts --------------------- */ -/* Redis performs most of the I/O in a nonblocking way, with the exception +/* The server performs most of the I/O in a nonblocking way, with the exception * of the SYNC command where the slave does it in a blocking way, and * the MIGRATE command that must be blocking in order to be atomic from the * point of view of the two instances (one migrating the key and one receiving diff --git a/src/syscheck.c b/src/syscheck.c index 0ea3a2510b..008e28bbff 100644 --- a/src/syscheck.c +++ b/src/syscheck.c @@ -63,7 +63,7 @@ static sds read_sysfs_line(char *path) { } /* Verify our clocksource implementation doesn't go through a system call (uses vdso). - * Going through a system call to check the time degrades Redis performance. */ + * Going through a system call to check the time degrades server performance. */ static int checkClocksource(sds *error_msg) { unsigned long test_time_us, system_hz; struct timespec ts; @@ -117,7 +117,7 @@ static int checkClocksource(sds *error_msg) { } /* Verify we're not using the `xen` clocksource. The xen hypervisor's default clocksource is slow and affects - * Redis's performance. This has been measured on ec2 xen based instances. ec2 recommends using the non-default + * the server's performance. This has been measured on ec2 xen based instances. ec2 recommends using the non-default * tsc clock source for these instances. */ int checkXenClocksource(sds *error_msg) { sds curr = read_sysfs_line("/sys/devices/system/clocksource/clocksource0/current_clocksource"); diff --git a/src/t_hash.c b/src/t_hash.c index ff8746384c..5c1d218c55 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -137,7 +137,7 @@ int hashTypeGetValue(robj *o, sds field, unsigned char **vstr, unsigned int *vle return C_ERR; } -/* Like hashTypeGetValue() but returns a Redis object, which is useful for +/* Like hashTypeGetValue() but returns an Object, which is useful for * interaction with the hash type outside t_hash.c. * The function returns NULL if the field is not found in the hash. Otherwise * a newly allocated string object with the value is returned. */ diff --git a/src/t_list.c b/src/t_list.c index d931937495..a3668ce1e3 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -1143,7 +1143,7 @@ void lmoveGenericCommand(client *c, int wherefrom, int whereto) { if (listTypeLength(sobj) == 0) { /* This may only happen after loading very old RDB files. Recent - * versions of Redis delete keys of empty lists. */ + * versions of the server delete keys of empty lists. */ addReplyNull(c); } else { robj *dobj = lookupKeyWrite(c->db,c->argv[2]); diff --git a/src/t_set.c b/src/t_set.c index 24e7b0e7d2..cb53045eb2 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -516,7 +516,7 @@ int setTypeConvertAndExpand(robj *setobj, int enc, unsigned long cap, int panic) return C_ERR; } - /* To add the elements we extract integers and create redis objects */ + /* To add the elements we extract integers and create Objects */ si = setTypeInitIterator(setobj); while ((element = setTypeNextObject(si)) != NULL) { serverAssert(dictAdd(d,element,NULL) == DICT_OK); diff --git a/src/t_stream.c b/src/t_stream.c index ee37f97808..f70019a746 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1870,7 +1870,7 @@ robj *streamTypeLookupWriteOrCreate(client *c, robj *key, int no_create) { return o; } -/* Parse a stream ID in the format given by clients to Redis, that is +/* Parse a stream ID in the format given by clients to the server, that is * -, and converts it into a streamID structure. If * the specified ID is invalid C_ERR is returned and an error is reported * to the client, otherwise C_OK is returned. The ID may be in incomplete diff --git a/src/t_string.c b/src/t_string.c index 2bce3acc81..1a36871233 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -57,7 +57,7 @@ static int checkStringLength(client *c, long long size, long long append) { * * 'flags' changes the behavior of the command (NX, XX or GET, see below). * - * 'expire' represents an expire to set in form of a Redis object as passed + * 'expire' represents an expire to set in the form of an Object as passed * by the user. It is interpreted according to the specified 'unit'. * * 'ok_reply' and 'abort_reply' is what the function will reply to the client diff --git a/src/t_zset.c b/src/t_zset.c index 6d4edd2123..d9a401a4d9 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -36,9 +36,9 @@ * in order to get O(log(N)) INSERT and REMOVE operations into a sorted * data structure. * - * The elements are added to a hash table mapping Redis objects to scores. + * The elements are added to a hash table mapping Objects to scores. * At the same time the elements are added to a skip list mapping scores - * to Redis objects (so objects are sorted by scores in this "view"). + * to Objects (so objects are sorted by scores in this "view"). * * Note that the SDS string representing the element is the same in both * the hash table and skiplist in order to save memory. What we do in order @@ -598,7 +598,7 @@ static int zslParseRange(robj *min, robj *max, zrangespec *spec) { * - means the min string possible * + means the max string possible * - * If the string is valid the *dest pointer is set to the redis object + * If the string is valid the *dest pointer is set to the Object * that will be used for the comparison, and ex will be set to 0 or 1 * respectively if the item is exclusive or inclusive. C_OK will be * returned. diff --git a/src/tls.c b/src/tls.c index ee3cd0fa3b..54eaf255dc 100644 --- a/src/tls.c +++ b/src/tls.c @@ -27,7 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#define VALKEYMODULE_CORE_MODULE /* A module that's part of the redis core, uses server.h too. */ +#define VALKEYMODULE_CORE_MODULE /* A module that's part of the server core, uses server.h too. */ #include "server.h" #include "connhelpers.h" @@ -1175,7 +1175,7 @@ int ValkeyModule_OnLoad(void *ctx, ValkeyModuleString **argv, int argc) { UNUSED(argv); UNUSED(argc); - /* Connection modules must be part of the same build as redis. */ + /* Connection modules must be part of the same build as the server. */ if (strcmp(REDIS_BUILD_ID_RAW, serverBuildIdRaw())) { serverLog(LL_NOTICE, "Connection type %s was not built together with the redis-server used.", CONN_TYPE_TLS); return VALKEYMODULE_ERR; diff --git a/src/tracking.c b/src/tracking.c index 429770065b..87183af566 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -216,7 +216,7 @@ void enableTracking(client *c, uint64_t redirect_to, uint64_t options, robj **pr /* This function is called after the execution of a readonly command in the * case the client 'c' has keys tracking enabled and the tracking is not * in BCAST mode. It will populate the tracking invalidation table according - * to the keys the user fetched, so that Redis will know what are the clients + * to the keys the user fetched, so that the server will know what are the clients * that should receive an invalidation message with certain groups of keys * are modified. */ void trackingRememberKeys(client *tracking, client *executing) { @@ -268,7 +268,7 @@ void trackingRememberKeys(client *tracking, client *executing) { * * In case the 'proto' argument is non zero, the function will assume that * 'keyname' points to a buffer of 'keylen' bytes already expressed in the - * form of Redis RESP protocol. This is used for: + * form of RESP protocol. This is used for: * - In BCAST mode, to send an array of invalidated keys to all * applicable clients * - Following a flush command, to send a single RESP NULL to indicate @@ -331,7 +331,7 @@ void sendTrackingMessage(client *c, char *keyname, size_t keylen, int proto) { if (!(old_flags & CLIENT_PUSHING)) c->flags &= ~CLIENT_PUSHING; } -/* This function is called when a key is modified in Redis and in the case +/* This function is called when a key is modified in the server and in the case * we have at least one client with the BCAST mode enabled. * Its goal is to set the key in the right broadcast state if the key * matches one or more prefixes in the prefix table. Later when we @@ -355,7 +355,7 @@ void trackingRememberKeyToBroadcast(client *c, char *keyname, size_t keylen) { raxStop(&ri); } -/* This function is called from signalModifiedKey() or other places in Redis +/* This function is called from signalModifiedKey() or other places in the server * when a key changes value. In the context of keys tracking, our task here is * to send a notification to every client that may have keys about such caching * slot. @@ -366,7 +366,7 @@ void trackingRememberKeyToBroadcast(client *c, char *keyname, size_t keylen) { * * The last argument 'bcast' tells the function if it should also schedule * the key for broadcasting to clients in BCAST mode. This is the case when - * the function is called from the Redis core once a key is modified, however + * the function is called from the server core once a key is modified, however * we also call the function in order to evict keys in the key table in case * of memory pressure: in that case the key didn't really change, so we want * just to notify the clients that are in the table for this key, that would @@ -458,7 +458,7 @@ void trackingHandlePendingKeyInvalidations(void) { listEmpty(server.tracking_pending_keys); } -/* This function is called when one or all the Redis databases are +/* This function is called when one or all of the databases are * flushed. Caching keys are not specific for each DB but are global: * currently what we do is send a special notification to clients with * tracking enabled, sending a RESP NULL, which means, "all the keys", @@ -504,12 +504,12 @@ void trackingInvalidateKeysOnFlush(int async) { } } -/* Tracking forces Redis to remember information about which client may have +/* Tracking forces the server to remember information about which client may have * certain keys. In workloads where there are a lot of reads, but keys are * hardly modified, the amount of information we have to remember server side * could be a lot, with the number of keys being totally not bound. * - * So Redis allows the user to configure a maximum number of keys for the + * So the server allows the user to configure a maximum number of keys for the * invalidation table. This function makes sure that we don't go over the * specified fill rate: if we are over, we can just evict information about * a random key, and send invalidation messages to clients like if the key was @@ -553,7 +553,7 @@ void trackingLimitUsedSlots(void) { timeout_counter++; } -/* Generate Redis protocol for an array containing all the key names +/* Generate RESP for an array containing all the key names * in the 'keys' radix tree. If the client is not NULL, the list will not * include keys that were modified the last time by this client, in order * to implement the NOLOOP option. diff --git a/src/unix.c b/src/unix.c index eb5850765a..5428c5110c 100644 --- a/src/unix.c +++ b/src/unix.c @@ -56,7 +56,7 @@ static int connUnixListen(connListener *listener) { if (listener->bindaddr_count == 0) return C_OK; - /* currently listener->bindaddr_count is always 1, we still use a loop here in case Redis supports multi Unix socket in the future */ + /* currently listener->bindaddr_count is always 1, we still use a loop here in case the server supports multi Unix socket in the future */ for (int j = 0; j < listener->bindaddr_count; j++) { char *addr = listener->bindaddr[j]; diff --git a/src/util.c b/src/util.c index 3f9533369b..b5ef5ea2b4 100644 --- a/src/util.c +++ b/src/util.c @@ -696,7 +696,7 @@ int d2string(char *buf, size_t len, double value) { * We convert the double to long and multiply it by 10 ^ to shift * the decimal places. * Note that multiply it of input value by 10 ^ can overflow but on the scenario - * that we currently use within redis this that is not possible. + * that we currently use within the server this that is not possible. * After we get the long representation we use the logic from ull2string function on this file * which is based on the following article: * https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920 @@ -959,8 +959,8 @@ void getRandomBytes(unsigned char *p, size_t len) { } } -/* Generate the Redis "Run ID", a SHA1-sized random number that identifies a - * given execution of Redis, so that if you are talking with an instance +/* Generate the server "Run ID", a SHA1-sized random number that identifies a + * given execution of the server, so that if you are talking with an instance * having run_id == A, and you reconnect and it has run_id == B, you can be * sure that it is either a different instance or it was restarted. */ void getRandomHexChars(char *p, size_t len) { @@ -1042,7 +1042,7 @@ long getTimeZone(void) { /* Return true if the specified path is just a file basename without any * relative or absolute path. This function just checks that no / or \ * character exists inside the specified path, that's enough in the - * environments where Redis runs. */ + * environments where the server runs. */ int pathIsBaseName(char *path) { return strchr(path,'/') == NULL && strchr(path,'\\') == NULL; } diff --git a/src/valkey-benchmark.c b/src/valkey-benchmark.c index 1a7b9f6c56..d3a3944f75 100644 --- a/src/valkey-benchmark.c +++ b/src/valkey-benchmark.c @@ -1,4 +1,4 @@ -/* Redis benchmark utility. +/* Server benchmark utility. * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. @@ -1377,7 +1377,7 @@ static void updateClusterSlotsConfiguration(void) { pthread_mutex_unlock(&config.is_updating_slots_mutex); } -/* Generate random data for redis benchmark. See #7196. */ +/* Generate random data for the benchmark. See #7196. */ static void genBenchmarkRandomData(char *data, int count) { static uint32_t state = 1234; int i = 0; @@ -1787,7 +1787,7 @@ int main(int argc, char **argv) { } exit(1); } - if (config.cluster_node_count <= 1) { + if (config.cluster_node_count == 0) { fprintf(stderr, "Invalid cluster: %d node(s).\n", config.cluster_node_count); exit(1); diff --git a/src/valkey-check-aof.c b/src/valkey-check-aof.c index cf2888cb90..dfe268201e 100644 --- a/src/valkey-check-aof.c +++ b/src/valkey-check-aof.c @@ -129,7 +129,7 @@ int readArgc(FILE *fp, long *target) { } /* Used to decode a RESP record in the AOF file to obtain the original - * redis command, and also check whether the command is MULTI/EXEC. If the + * server command, and also check whether the command is MULTI/EXEC. If the * command is MULTI, the parameter out_multi will be incremented by one, and * if the command is EXEC, the parameter out_multi will be decremented * by one. The parameter out_multi will be used by the upper caller to determine @@ -426,7 +426,7 @@ int fileIsManifest(char *filepath) { * AOF_RDB_PREAMBLE: Old-style RDB-preamble AOF * AOF_MULTI_PART: manifest in Multi Part AOF * - * redis-check-aof tool will automatically perform different + * valkey-check-aof tool will automatically perform different * verification logic according to different file formats. * */ input_file_type getInputFileType(char *filepath) { @@ -454,7 +454,7 @@ void printAofStyle(int ret, char *aofFileName, char *aofType) { /* Check if Multi Part AOF is valid. It will check the BASE file and INCR files * at once according to the manifest instructions (this is somewhat similar to - * redis' AOF loading). + * the server's AOF loading). * * When the verification is successful, we can guarantee: * 1. The manifest file format is valid diff --git a/src/valkey-check-rdb.c b/src/valkey-check-rdb.c index f938a48664..fcc525dcb3 100644 --- a/src/valkey-check-rdb.c +++ b/src/valkey-check-rdb.c @@ -396,8 +396,8 @@ int redis_check_rdb(char *rdbfilename, FILE *fp) { return 1; } -/* RDB check main: called form server.c when Redis is executed with the - * redis-check-rdb alias, on during RDB loading errors. +/* RDB check main: called form server.c when the server is executed with the + * valkey-check-rdb alias, on during RDB loading errors. * * The function works in two ways: can be called with argc/argv as a * standalone executable, or called with a non NULL 'fp' argument if we @@ -426,7 +426,7 @@ int redis_check_rdb_main(int argc, char **argv, FILE *fp) { /* In order to call the loading functions we need to create the shared * integer objects, however since this function may be called from - * an already initialized Redis instance, check if we really need to. */ + * an already initialized server instance, check if we really need to. */ if (shared.integers[0] == NULL) createSharedObjects(); server.loading_process_events_interval_bytes = 0; diff --git a/src/valkey-cli.c b/src/valkey-cli.c index ba16d03fa7..cfc3fcaaff 100644 --- a/src/valkey-cli.c +++ b/src/valkey-cli.c @@ -1,4 +1,4 @@ -/* Redis CLI (command line interface) +/* Server CLI (command line interface) * * Copyright (c) 2009-2012, Salvatore Sanfilippo * All rights reserved. @@ -425,10 +425,10 @@ static int helpEntriesLen = 0; /* For backwards compatibility with pre-7.0 servers. * cliLegacyInitHelp() sets up the helpEntries array with the command and group - * names from the commands.c file. However the Redis instance we are connecting + * names from the commands.c file. However the server instance we are connecting * to may support more commands, so this function integrates the previous * entries with additional entries obtained using the COMMAND command - * available in recent versions of Redis. */ + * available in recent versions of the server. */ static void cliLegacyIntegrateHelp(void) { if (cliConnect(CC_QUIET) == REDIS_ERR) return; @@ -986,7 +986,7 @@ static void cliOutputHelp(int argc, char **argv) { if (helpEntries == NULL) { /* Initialize the help using the results of the COMMAND command. - * In case we are using redis-cli help XXX, we need to init it. */ + * In case we are using valkey-cli help XXX, we need to init it. */ cliInitHelp(); } @@ -1602,7 +1602,7 @@ static int cliSelect(void) { return result; } -/* Select RESP3 mode if redis-cli was started with the -3 option. */ +/* Select RESP3 mode if valkey-cli was started with the -3 option. */ static int cliSwitchProto(void) { redisReply *reply; if (!config.resp3 || config.resp2) return REDIS_OK; @@ -1689,7 +1689,7 @@ static int cliConnect(int flags) { } - /* Set aggressive KEEP_ALIVE socket option in the Redis context socket + /* Set aggressive KEEP_ALIVE socket option in the server context socket * in order to prevent timeouts caused by the execution of long * commands. At the same time this improves the detection of real * errors. */ @@ -2315,7 +2315,7 @@ static int cliReadReply(int output_raw_strings) { return REDIS_OK; } -/* Simultaneously wait for pubsub messages from redis and input on stdin. */ +/* Simultaneously wait for pubsub messages from the server and input on stdin. */ static void cliWaitForMessagesOrStdin(void) { int show_info = config.output != OUTPUT_RAW && (isatty(STDOUT_FILENO) || getenv("FAKETTY")); @@ -2337,7 +2337,7 @@ static void cliWaitForMessagesOrStdin(void) { } } while(reply); - /* Wait for input, either on the Redis socket or on stdin. */ + /* Wait for input, either on the server socket or on stdin. */ struct timeval tv; fd_set readfds; FD_ZERO(&readfds); @@ -2366,7 +2366,7 @@ static void cliWaitForMessagesOrStdin(void) { } break; } else if (FD_ISSET(context->fd, &readfds)) { - /* Message from Redis */ + /* Message from the server */ if (cliReadReply(0) != REDIS_OK) { cliPrintContextError(); exit(1); @@ -2409,7 +2409,7 @@ static int cliSendCommand(int argc, char **argv, long repeat) { !strcasecmp(argv[1],"graph")) || (argc == 2 && !strcasecmp(command,"latency") && !strcasecmp(argv[1],"doctor")) || - /* Format PROXY INFO command for Redis Cluster Proxy: + /* Format PROXY INFO command for Cluster Proxy: * https://github.com/artix75/redis-cluster-proxy */ (argc >= 2 && !strcasecmp(command,"proxy") && !strcasecmp(argv[1],"info"))) @@ -2760,7 +2760,7 @@ static int parseOptions(int argc, char **argv) { config.bigkeys = 1; } else if (!strcmp(argv[i],"--memkeys")) { config.memkeys = 1; - config.memkeys_samples = 0; /* use redis default */ + config.memkeys_samples = 0; /* use the server default */ } else if (!strcmp(argv[i],"--memkeys-samples") && !lastarg) { config.memkeys = 1; config.memkeys_samples = atoi(argv[++i]); @@ -3167,10 +3167,10 @@ static int confirmWithYes(char *msg, int ignore_force) { } static int issueCommandRepeat(int argc, char **argv, long repeat) { - /* In Lua debugging mode, we want to pass the "help" to Redis to get + /* In Lua debugging mode, we want to pass the "help" to the server to get * it's own HELP message, rather than handle it by the CLI, see ldbRepl. * - * For the normal Redis HELP, we can process it without a connection. */ + * For the normal server HELP, we can process it without a connection. */ if (!config.eval_ldb && (!strcasecmp(argv[0],"help") || !strcasecmp(argv[0],"?"))) { @@ -3363,9 +3363,9 @@ static void repl(void) { int argc; sds *argv; - /* There is no need to initialize redis HELP when we are in lua debugger mode. + /* There is no need to initialize HELP when we are in lua debugger mode. * It has its own HELP and commands (COMMAND or COMMAND DOCS will fail and got nothing). - * We will initialize the redis HELP after the Lua debugging session ended.*/ + * We will initialize the HELP after the Lua debugging session ended.*/ if ((!config.eval_ldb) && isatty(fileno(stdin))) { /* Initialize the help using the results of the COMMAND command. */ cliInitHelp(); @@ -4109,7 +4109,7 @@ static int clusterManagerNodeConnect(clusterManagerNode *node) { node->context = NULL; return 0; } - /* Set aggressive KEEP_ALIVE socket option in the Redis context socket + /* Set aggressive KEEP_ALIVE socket option in the server context socket * in order to prevent timeouts caused by the execution of long * commands. At the same time this improves the detection of real * errors. */ @@ -4193,7 +4193,7 @@ static void clusterManagerNodeResetSlots(clusterManagerNode *node) { node->slots_count = 0; } -/* Call "INFO" redis command on the specified node and return the reply. */ +/* Call "INFO" command on the specified node and return the reply. */ static redisReply *clusterManagerGetNodeRedisInfo(clusterManagerNode *node, char **err) { @@ -4882,7 +4882,7 @@ static int clusterManagerSetSlotOwner(clusterManagerNode *owner, } /* Get the hash for the values of the specified keys in *keys_reply for the - * specified nodes *n1 and *n2, by calling DEBUG DIGEST-VALUE redis command + * specified nodes *n1 and *n2, by calling DEBUG DIGEST-VALUE command * on both nodes. Every key with same name on both nodes but having different * values will be added to the *diffs list. Return 0 in case of reply * error. */ @@ -6873,7 +6873,7 @@ static void clusterManagerPrintNotClusterNodeError(clusterManagerNode *node, clusterManagerLogErr("[ERR] Node %s:%d %s\n", node->ip, node->port, msg); } -/* Execute redis-cli in Cluster Manager mode */ +/* Execute valkey-cli in Cluster Manager mode */ static void clusterManagerMode(clusterManagerCommandProc *proc) { int argc = config.cluster_manager_command.argc; char **argv = config.cluster_manager_command.argv; @@ -8828,7 +8828,7 @@ static void pipeMode(void) { /* The ECHO sequence starts with a "\r\n" so that if there * is garbage in the protocol we read from stdin, the ECHO * will likely still be properly formatted. - * CRLF is ignored by Redis, so it has no effects. */ + * CRLF is ignored by the server, so it has no effects. */ char echo[] = "\r\n*2\r\n$4\r\nECHO\r\n$20\r\n01234567890123456789\r\n"; int j; diff --git a/src/valkeymodule.h b/src/valkeymodule.h index 104d0923d1..18ee75415a 100644 --- a/src/valkeymodule.h +++ b/src/valkeymodule.h @@ -583,7 +583,7 @@ static const ValkeyModuleEvent VALKEYMODULE_EVENT_CRON_LOOP, 1 }, - ValkeyModuleEvent_MasterLinkChange = { + ValkeyModuleEvent_PrimaryLinkChange = { VALKEYMODULE_EVENT_PRIMARY_LINK_CHANGE, 1 }, diff --git a/src/version.h b/src/version.h index b82be5cbe6..3b2e3a0601 100644 --- a/src/version.h +++ b/src/version.h @@ -2,7 +2,7 @@ #define VALKEY_VERSION "255.255.255" #define VALKEY_VERSION_NUM 0x00ffffff -/* Redis compatibility version, should never +/* Redis OSS compatibility version, should never * exceed 7.2.x. */ #define REDIS_VERSION "7.2.4" #define REDIS_VERSION_NUM 0x00070204 diff --git a/src/zipmap.c b/src/zipmap.c index 214c49d2fc..de42a04abc 100644 --- a/src/zipmap.c +++ b/src/zipmap.c @@ -3,11 +3,11 @@ * implementing an O(n) lookup data structure designed to be very memory * efficient. * - * The Redis Hash type uses this data structure for hashes composed of a small + * The Hash type uses this data structure for hashes composed of a small * number of elements, to switch to a hash table once a given number of * elements is reached. * - * Given that many times Redis Hashes are used to represent objects composed + * Given that many times Hashes are used to represent objects composed * of few fields, this is a very big win in terms of used memory. * * -------------------------------------------------------------------------- diff --git a/src/zmalloc.c b/src/zmalloc.c index 65ad1f4c6a..550752240f 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -446,7 +446,7 @@ void zmadvise_dontneed(void *ptr) { /* Get the RSS information in an OS-specific way. * * WARNING: the function zmalloc_get_rss() is not designed to be fast - * and may not be called in the busy loops where Redis tries to release + * and may not be called in the busy loops where the server tries to release * memory expiring or swapping out objects. * * For this kind of "fast RSS reporting" usages use instead the @@ -769,7 +769,7 @@ void zlibc_trim(void) { /* For proc_pidinfo() used later in zmalloc_get_smap_bytes_by_field(). * Note that this file cannot be included in zmalloc.h because it includes * a Darwin queue.h file where there is a "LIST_HEAD" macro (!) defined - * conficting with Redis user code. */ + * conficting with user code. */ #include #endif diff --git a/src/zmalloc.h b/src/zmalloc.h index 9baa36b3de..1cf4af96c4 100644 --- a/src/zmalloc.h +++ b/src/zmalloc.h @@ -99,8 +99,8 @@ #include #endif -/* We can enable the Redis defrag capabilities only if we are using Jemalloc - * and the version used is our special version modified for Redis having +/* We can enable the server defrag capabilities only if we are using Jemalloc + * and the version used is our special version modified for the server having * the ability to return per-allocation fragmentation hints. */ #if defined(USE_JEMALLOC) && defined(JEMALLOC_FRAG_HINT) #define HAVE_DEFRAG diff --git a/tests/assets/default.conf b/tests/assets/default.conf index de460cc081..4478f80fca 100644 --- a/tests/assets/default.conf +++ b/tests/assets/default.conf @@ -1,4 +1,4 @@ -# Redis configuration for testing. +# Server configuration for testing. always-show-logo yes notify-keyspace-events KEA diff --git a/tests/assets/test_cli_hint_suite.txt b/tests/assets/test_cli_hint_suite.txt index b4a20163fb..3cebf5229c 100644 --- a/tests/assets/test_cli_hint_suite.txt +++ b/tests/assets/test_cli_hint_suite.txt @@ -1,4 +1,4 @@ -# Test suite for redis-cli command-line hinting mechanism. +# Test suite for valkey-cli command-line hinting mechanism. # Each test case consists of two strings: a (partial) input command line, and the expected hint string. # Command with one arg: GET key diff --git a/tests/cluster/cluster.tcl b/tests/cluster/cluster.tcl index 9931eac849..83168f0918 100644 --- a/tests/cluster/cluster.tcl +++ b/tests/cluster/cluster.tcl @@ -182,8 +182,21 @@ proc cluster_write_test {id} { } # Check if cluster configuration is consistent. +# All the nodes in the cluster should show same slots configuration and have health +# state "online" to be considered as consistent. proc cluster_config_consistent {} { for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} { + # Check if all the nodes are online + set shards_cfg [R $j CLUSTER SHARDS] + foreach shard_cfg $shards_cfg { + set nodes [dict get $shard_cfg nodes] + foreach node $nodes { + if {[dict get $node health] ne "online"} { + return 0 + } + } + } + if {$j == 0} { set base_cfg [R $j cluster slots] } else { @@ -199,7 +212,7 @@ proc cluster_config_consistent {} { # Wait for cluster configuration to propagate and be consistent across nodes. proc wait_for_cluster_propagation {} { - wait_for_condition 50 100 { + wait_for_condition 1000 50 { [cluster_config_consistent] eq 1 } else { fail "cluster config did not reach a consistent state" diff --git a/tests/cluster/run.tcl b/tests/cluster/run.tcl index 86c5f589b7..9808032b50 100644 --- a/tests/cluster/run.tcl +++ b/tests/cluster/run.tcl @@ -5,7 +5,7 @@ cd tests/cluster source cluster.tcl source ../instances.tcl -source ../../support/cluster.tcl ; # Redis Cluster client. +source ../../support/cluster.tcl ; # Cluster client. set ::instances_count 20 ; # How many instances we use at max. set ::tlsdir "../../tls" diff --git a/tests/cluster/tests/04-resharding.tcl b/tests/cluster/tests/04-resharding.tcl index babedd9724..e38b2bc6b5 100644 --- a/tests/cluster/tests/04-resharding.tcl +++ b/tests/cluster/tests/04-resharding.tcl @@ -101,7 +101,7 @@ test "Cluster consistency during live resharding" { set key "key:$listid" incr ele # We write both with Lua scripts and with plain commands. - # This way we are able to stress Lua -> Redis command invocation + # This way we are able to stress Lua -> server command invocation # as well, that has tests to prevent Lua to write into wrong # hash slots. # We also use both TLS and plaintext connections. @@ -129,7 +129,7 @@ test "Cluster consistency during live resharding" { } test "Verify $numkeys keys for consistency with logical content" { - # Check that the Redis Cluster content matches our logical content. + # Check that the Cluster content matches our logical content. foreach {key value} [array get content] { if {[$cluster lrange $key 0 -1] ne $value} { fail "Key $key expected to hold '$value' but actual content is [$cluster lrange $key 0 -1]" @@ -151,7 +151,7 @@ test "Cluster should eventually be up again" { } test "Verify $numkeys keys after the restart" { - # Check that the Redis Cluster content matches our logical content. + # Check that the Cluster content matches our logical content. foreach {key value} [array get content] { if {[$cluster lrange $key 0 -1] ne $value} { fail "Key $key expected to hold '$value' but actual content is [$cluster lrange $key 0 -1]" diff --git a/tests/cluster/tests/10-manual-failover.tcl b/tests/cluster/tests/10-manual-failover.tcl index 5441b79f38..e93bd680e3 100644 --- a/tests/cluster/tests/10-manual-failover.tcl +++ b/tests/cluster/tests/10-manual-failover.tcl @@ -41,7 +41,7 @@ test "Send CLUSTER FAILOVER to #5, during load" { set key "key:$listid" set ele [randomValue] # We write both with Lua scripts and with plain commands. - # This way we are able to stress Lua -> Redis command invocation + # This way we are able to stress Lua -> server command invocation # as well, that has tests to prevent Lua to write into wrong # hash slots. if {$listid % 2} { @@ -80,7 +80,7 @@ test "Instance #5 is now a master" { } test "Verify $numkeys keys for consistency with logical content" { - # Check that the Redis Cluster content matches our logical content. + # Check that the Cluster content matches our logical content. foreach {key value} [array get content] { assert {[$cluster lrange $key 0 -1] eq $value} } diff --git a/tests/cluster/tests/16-transactions-on-replica.tcl b/tests/cluster/tests/16-transactions-on-replica.tcl index 8bec06ee4a..b16569e104 100644 --- a/tests/cluster/tests/16-transactions-on-replica.tcl +++ b/tests/cluster/tests/16-transactions-on-replica.tcl @@ -58,7 +58,7 @@ test "MULTI-EXEC with write operations is MOVED" { } test "read-only blocking operations from replica" { - set rd [redis_deferring_client redis 1] + set rd [valkey_deferring_client redis 1] $rd readonly $rd read $rd XREAD BLOCK 0 STREAMS k 0 diff --git a/tests/cluster/tests/17-diskless-load-swapdb.tcl b/tests/cluster/tests/17-diskless-load-swapdb.tcl index 7a56ec783b..eae95d3b07 100644 --- a/tests/cluster/tests/17-diskless-load-swapdb.tcl +++ b/tests/cluster/tests/17-diskless-load-swapdb.tcl @@ -51,7 +51,7 @@ test "Main db not affected when fail to diskless load" { # backlog size is very small, and dumping rdb will cost several seconds. set num 10000 set value [string repeat A 1024] - set rd [redis_deferring_client redis $master_id] + set rd [valkey_deferring_client redis $master_id] for {set j 0} {$j < $num} {incr j} { $rd set $j $value } diff --git a/tests/cluster/tests/20-half-migrated-slot.tcl b/tests/cluster/tests/20-half-migrated-slot.tcl index 229b3a86df..6d49716e12 100644 --- a/tests/cluster/tests/20-half-migrated-slot.tcl +++ b/tests/cluster/tests/20-half-migrated-slot.tcl @@ -6,7 +6,7 @@ # 5. migration is half finished on "importing" node # TODO: Test is currently disabled until it is stabilized (fixing the test -# itself or real issues in Redis). +# itself or real issues in the server). if {false} { source "../tests/includes/init-tests.tcl" diff --git a/tests/cluster/tests/21-many-slot-migration.tcl b/tests/cluster/tests/21-many-slot-migration.tcl index 1ac73dc997..e4f8eb205b 100644 --- a/tests/cluster/tests/21-many-slot-migration.tcl +++ b/tests/cluster/tests/21-many-slot-migration.tcl @@ -1,7 +1,7 @@ # Tests for many simultaneous migrations. # TODO: Test is currently disabled until it is stabilized (fixing the test -# itself or real issues in Redis). +# itself or real issues in the server). if {false} { diff --git a/tests/cluster/tests/25-pubsubshard-slot-migration.tcl b/tests/cluster/tests/25-pubsubshard-slot-migration.tcl index fd774a8d7b..4f8373b54d 100644 --- a/tests/cluster/tests/25-pubsubshard-slot-migration.tcl +++ b/tests/cluster/tests/25-pubsubshard-slot-migration.tcl @@ -30,7 +30,7 @@ test "Migrate a slot, verify client receives sunsubscribe on primary serving the array set nodefrom [$cluster masternode_for_slot $slot] array set nodeto [$cluster masternode_notfor_slot $slot] - set subscribeclient [redis_deferring_client_by_addr $nodefrom(host) $nodefrom(port)] + set subscribeclient [valkey_deferring_client_by_addr $nodefrom(host) $nodefrom(port)] $subscribeclient deferred 1 $subscribeclient ssubscribe $channelname @@ -64,7 +64,7 @@ test "Client subscribes to multiple channels, migrate a slot, verify client rece array set nodefrom [$cluster masternode_for_slot $slot] array set nodeto [$cluster masternode_notfor_slot $slot] - set subscribeclient [redis_deferring_client_by_addr $nodefrom(host) $nodefrom(port)] + set subscribeclient [valkey_deferring_client_by_addr $nodefrom(host) $nodefrom(port)] $subscribeclient deferred 1 $subscribeclient ssubscribe $channelname @@ -113,7 +113,7 @@ test "Migrate a slot, verify client receives sunsubscribe on replica serving the set replica_addr [get_addr_replica_serving_slot $slot] set replicahost [lindex $replica_addr 0] set replicaport [lindex $replica_addr 1] - set subscribeclient [redis_deferring_client_by_addr $replicahost $replicaport] + set subscribeclient [valkey_deferring_client_by_addr $replicahost $replicaport] $subscribeclient deferred 1 $subscribeclient ssubscribe $channelname @@ -148,7 +148,7 @@ test "Move a replica to another primary, verify client receives sunsubscribe on set replica_host [lindex $replica_addr 0] set replica_port [lindex $replica_addr 1] set replica_client [redis_client_by_addr $replica_host $replica_port] - set subscribeclient [redis_deferring_client_by_addr $replica_host $replica_port] + set subscribeclient [valkey_deferring_client_by_addr $replica_host $replica_port] $subscribeclient deferred 1 $subscribeclient ssubscribe $channelname @@ -174,7 +174,7 @@ test "Delete a slot, verify sunsubscribe message" { array set primary_client [$cluster masternode_for_slot $slot] - set subscribeclient [redis_deferring_client_by_addr $primary_client(host) $primary_client(port)] + set subscribeclient [valkey_deferring_client_by_addr $primary_client(host) $primary_client(port)] $subscribeclient deferred 1 $subscribeclient ssubscribe $channelname $subscribeclient read @@ -195,7 +195,7 @@ test "Reset cluster, verify sunsubscribe message" { array set primary_client [$cluster masternode_for_slot $slot] - set subscribeclient [redis_deferring_client_by_addr $primary_client(host) $primary_client(port)] + set subscribeclient [valkey_deferring_client_by_addr $primary_client(host) $primary_client(port)] $subscribeclient deferred 1 $subscribeclient ssubscribe $channelname $subscribeclient read diff --git a/tests/cluster/tests/26-pubsubshard.tcl b/tests/cluster/tests/26-pubsubshard.tcl index 34939acf7c..422c06bf5d 100644 --- a/tests/cluster/tests/26-pubsubshard.tcl +++ b/tests/cluster/tests/26-pubsubshard.tcl @@ -14,9 +14,9 @@ test "Pub/Sub shard basics" { array set notshardnode [$cluster masternode_notfor_slot $slot] set publishclient [redis_client_by_addr $publishnode(host) $publishnode(port)] - set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] - set subscribeclient2 [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] - set anotherclient [redis_deferring_client_by_addr $notshardnode(host) $notshardnode(port)] + set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set subscribeclient2 [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set anotherclient [valkey_deferring_client_by_addr $notshardnode(host) $notshardnode(port)] $subscribeclient ssubscribe channel.0 $subscribeclient read @@ -58,7 +58,7 @@ test "client can subscribe to multiple shard channels across different slots in test "sunsubscribe without specifying any channel would unsubscribe all shard channels subscribed" { set publishclient [redis_client_by_addr $publishnode(host) $publishnode(port)] - set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] set sub_res [ssubscribe $subscribeclient [list "\{channel.0\}1" "\{channel.0\}2" "\{channel.0\}3"]] assert_equal [list 1 2 3] $sub_res @@ -78,9 +78,9 @@ test "Verify Pub/Sub and Pub/Sub shard no overlap" { array set notshardnode [$cluster masternode_notfor_slot $slot] set publishshardclient [redis_client_by_addr $publishnode(host) $publishnode(port)] - set publishclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] - set subscribeshardclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] - set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set publishclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set subscribeshardclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] $subscribeshardclient deferred 1 $subscribeshardclient ssubscribe channel.0 @@ -109,9 +109,9 @@ test "Verify Pub/Sub and Pub/Sub shard no overlap" { } test "PUBSUB channels/shardchannels" { - set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] - set subscribeclient2 [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] - set subscribeclient3 [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set subscribeclient [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set subscribeclient2 [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] + set subscribeclient3 [valkey_deferring_client_by_addr $publishnode(host) $publishnode(port)] set publishclient [redis_client_by_addr $publishnode(host) $publishnode(port)] ssubscribe $subscribeclient [list "\{channel.0\}1"] diff --git a/tests/cluster/tests/includes/utils.tcl b/tests/cluster/tests/includes/utils.tcl index e8925ebfc4..dd477cce84 100644 --- a/tests/cluster/tests/includes/utils.tcl +++ b/tests/cluster/tests/includes/utils.tcl @@ -13,7 +13,7 @@ proc fix_cluster {addr} { if {$code != 0} { puts "redis-cli --cluster fix returns non-zero exit code, output below:\n$result" } - # Note: redis-cli --cluster fix may return a non-zero exit code if nodes don't agree, + # Note: valkey-cli --cluster fix may return a non-zero exit code if nodes don't agree, # but we can ignore that and rely on the check below. assert_cluster_state ok wait_for_condition 100 100 { diff --git a/tests/helpers/fake_redis_node.tcl b/tests/helpers/fake_redis_node.tcl index a12d87fedf..cfcce62401 100644 --- a/tests/helpers/fake_redis_node.tcl +++ b/tests/helpers/fake_redis_node.tcl @@ -1,4 +1,4 @@ -# A fake Redis node for replaying predefined/expected traffic with a client. +# A fake node for replaying predefined/expected traffic with a client. # # Usage: tclsh fake_redis_node.tcl PORT COMMAND REPLY [ COMMAND REPLY [ ... ] ] # diff --git a/tests/instances.tcl b/tests/instances.tcl index 1450d7ee01..74e5491696 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -1,6 +1,6 @@ # Multi-instance test framework. -# This is used in order to test Sentinel and Redis Cluster, and provides -# basic capabilities for spawning and handling N parallel Redis / Sentinel +# This is used in order to test Sentinel and Cluster, and provides +# basic capabilities for spawning and handling N parallel Server / Sentinel # instances. # # Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com @@ -64,7 +64,7 @@ proc exec_instance {type dirname cfgfile} { return $pid } -# Spawn a redis or sentinel instance, depending on 'type'. +# Spawn a server or sentinel instance, depending on 'type'. proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} { for {set j 0} {$j < $count} {incr j} { set port [find_available_port $base_port $::redis_port_count] @@ -87,7 +87,7 @@ proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} { if {$::tls} { if {$::tls_module} { - puts $cfg [format "loadmodule %s/../../../src/redis-tls.so" [pwd]] + puts $cfg [format "loadmodule %s/../../../src/valkey-tls.so" [pwd]] } puts $cfg "tls-port $port" @@ -526,14 +526,14 @@ proc S {n args} { [dict get $s link] {*}$args } -# Returns a Redis instance by index. +# Returns a server instance by index. # Example: # [Rn 0] info proc Rn {n} { return [dict get [lindex $::redis_instances $n] link] } -# Like R but to chat with Redis instances. +# Like R but to chat with server instances. proc R {n args} { [Rn $n] {*}$args } @@ -566,7 +566,7 @@ proc RPort {n} { } } -# Iterate over IDs of sentinel or redis instances. +# Iterate over IDs of sentinel or server instances. proc foreach_instance_id {instances idvar code} { upvar 1 $idvar id for {set id 0} {$id < [llength $instances]} {incr id} { @@ -717,14 +717,14 @@ proc restart_instance {type id} { } } -proc redis_deferring_client {type id} { +proc valkey_deferring_client {type id} { set port [get_instance_attrib $type $id port] set host [get_instance_attrib $type $id host] set client [redis $host $port 1 $::tls] return $client } -proc redis_deferring_client_by_addr {host port} { +proc valkey_deferring_client_by_addr {host port} { set client [redis $host $port 1 $::tls] return $client } diff --git a/tests/integration/aof-multi-part.tcl b/tests/integration/aof-multi-part.tcl index 383dbeb73e..c642232044 100644 --- a/tests/integration/aof-multi-part.tcl +++ b/tests/integration/aof-multi-part.tcl @@ -19,9 +19,9 @@ tags {"external:skip"} { # Test Part 1 - # In order to test the loading logic of redis under different combinations of manifest and AOF. - # We will manually construct the manifest file and AOF, and then start redis to verify whether - # the redis behavior is as expected. + # In order to test the loading logic of the server under different combinations of manifest and AOF. + # We will manually construct the manifest file and AOF, and then start the server to verify whether + # the server behavior is as expected. test {Multi Part AOF can't load data when some file missing} { create_aof $aof_dirpath $aof_base1_file { @@ -748,8 +748,8 @@ tags {"external:skip"} { # Test Part 2 # - # To test whether the AOFRW behaves as expected during the redis run. - # We will start redis first, then perform pressure writing, enable and disable AOF, and manually + # To test whether the AOFRW behaves as expected during the server run. + # We will start the server first, then perform pressure writing, enable and disable AOF, and manually # and automatically run bgrewrite and other actions, to test whether the correct AOF file is created, # whether the correct manifest is generated, whether the data can be reload correctly under continuous # writing pressure, etc. diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl index 6006a00ff9..7218419c12 100644 --- a/tests/integration/aof.tcl +++ b/tests/integration/aof.tcl @@ -210,7 +210,7 @@ tags {"aof external:skip"} { start_server {overrides {appendonly {yes} appendfsync always}} { test {AOF fsync always barrier issue} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # Set a sleep when aof is flushed, so that we have a chance to look # at the aof size and detect if the response of an incr command # arrives before the data was written (and hopefully fsynced) @@ -374,7 +374,7 @@ tags {"aof external:skip"} { } } - # redis could load AOF which has timestamp annotations inside + # The server could load AOF which has timestamp annotations inside create_aof $aof_dirpath $aof_file { append_to_aof "#TS:1628217470\r\n" append_to_aof [formatCommand set foo1 bar1] @@ -438,7 +438,7 @@ tags {"aof external:skip"} { append_to_aof [formatCommand select 9] append_to_aof [formatCommand eval {redis.call('set',KEYS[1],'y'); for i=1,1500000 do redis.call('ping') end return 'ok'} 1 x] } - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd debug loadaof $rd flush wait_for_condition 100 10 { diff --git a/tests/integration/corrupt-dump-fuzzer.tcl b/tests/integration/corrupt-dump-fuzzer.tcl index 206e503fc7..65f5c673dc 100644 --- a/tests/integration/corrupt-dump-fuzzer.tcl +++ b/tests/integration/corrupt-dump-fuzzer.tcl @@ -5,7 +5,7 @@ tags {"dump" "corruption" "external:skip" "logreqres:skip"} { # catch sigterm so that in case one of the random command hangs the test, -# usually due to redis not putting a response in the output buffers, +# usually due to the server not putting a response in the output buffers, # we'll know which command it was if { ! [ catch { package require Tclx @@ -14,7 +14,7 @@ if { ! [ catch { } proc generate_collections {suffix elements} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] for {set j 0} {$j < $elements} {incr j} { # add both string values and integers if {$j % 2 == 0} {set val $j} else {set val "_$j"} diff --git a/tests/integration/dismiss-mem.tcl b/tests/integration/dismiss-mem.tcl index 87f6e1dfc9..6860e52f61 100644 --- a/tests/integration/dismiss-mem.tcl +++ b/tests/integration/dismiss-mem.tcl @@ -61,7 +61,7 @@ start_server {tags {"dismiss external:skip"}} { for {set i 0} {$i < 100} {incr i} { r lpush mylist $item } - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd lrange mylist 0 -1 $rd flush after 100 @@ -74,7 +74,7 @@ start_server {tags {"dismiss external:skip"}} { test {dismiss client query buffer} { # Big pending query buffer set bigstr [string repeat A 8192] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd write "*2\r\n\$8192\r\n" $rd write $bigstr\r\n $rd flush diff --git a/tests/integration/failover.tcl b/tests/integration/failover.tcl index 21fa3d2815..70bb66284d 100644 --- a/tests/integration/failover.tcl +++ b/tests/integration/failover.tcl @@ -256,7 +256,7 @@ start_server {overrides {save {}}} { # We pause the target long enough to send a write command # during the pause. This write will not be interrupted. pause_process [srv -1 pid] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd SET FOO BAR $node_0 failover to $node_1_host $node_1_port resume_process [srv -1 pid] diff --git a/tests/integration/logging.tcl b/tests/integration/logging.tcl index b547cd8fab..06b2e24e58 100644 --- a/tests/integration/logging.tcl +++ b/tests/integration/logging.tcl @@ -8,7 +8,7 @@ if {$system_name eq {linux}} { } # look for the DEBUG command in the backtrace, used when we triggered -# a stack trace print while we know redis is running that command. +# a stack trace print while we know the server is running that command. proc check_log_backtrace_for_debug {log_pattern} { # search for the final line in the stacktraces generation to make sure it was completed. set pattern "* STACK TRACE DONE *" @@ -17,7 +17,7 @@ proc check_log_backtrace_for_debug {log_pattern} { set res [wait_for_log_messages 0 \"$log_pattern\" 0 100 100] if {$::verbose} { puts $res} - # If the stacktrace is printed more than once, it means redis crashed during crash report generation + # If the stacktrace is printed more than once, it means the server crashed during crash report generation assert_equal [count_log_message 0 "STACK TRACE -"] 1 upvar threads_mngr_supported threads_mngr_supported @@ -30,7 +30,7 @@ proc check_log_backtrace_for_debug {log_pattern} { # the following are skipped since valgrind is slow and a timeout can happen if {!$::valgrind} { assert_equal [count_log_message 0 "wait_threads(): waiting threads timed out"] 0 - # make sure redis prints stack trace for all threads. we know 3 threads are idle in bio.c + # make sure the server prints stack trace for all threads. we know 3 threads are idle in bio.c assert_equal [count_log_message 0 "bioProcessBackgroundJobs"] 3 } } @@ -55,7 +55,7 @@ if {$backtrace_supported} { r debug sleep 1 check_log_backtrace_for_debug "*WATCHDOG TIMER EXPIRED*" - # make sure redis is still alive + # make sure the server is still alive assert_equal "PONG" [r ping] } } @@ -77,7 +77,7 @@ if {!$::valgrind} { r deferred 1 r debug sleep 10 ;# so that we see the function in the stack trace r flush - after 100 ;# wait for redis to get into the sleep + after 100 ;# wait for the server to get into the sleep exec kill -SIGABRT $pid $check_cb "*crashed by signal*" } @@ -100,12 +100,12 @@ if {!$::valgrind} { r deferred 1 r debug sleep 10 ;# so that we see the function in the stack trace r flush - after 100 ;# wait for redis to get into the sleep + after 100 ;# wait for the server to get into the sleep exec kill -SIGALRM $pid $check_cb "*Received SIGALRM*" r read r deferred 0 - # make sure redis is still alive + # make sure the server is still alive assert_equal "PONG" [r ping] } } diff --git a/tests/integration/psync2-pingoff.tcl b/tests/integration/psync2-pingoff.tcl index 3589d07e75..4ea70f0c32 100644 --- a/tests/integration/psync2-pingoff.tcl +++ b/tests/integration/psync2-pingoff.tcl @@ -1,5 +1,5 @@ # These tests were added together with the meaningful offset implementation -# in redis 6.0.0, which was later abandoned in 6.0.4, they used to test that +# in redis OSS 6.0.0, which was later abandoned in 6.0.4, they used to test that # servers are able to PSYNC with replicas even if the replication stream has # PINGs at the end which present in one sever and missing on another. # We keep these tests just because they reproduce edge cases in the replication diff --git a/tests/integration/psync2-reg.tcl b/tests/integration/psync2-reg.tcl index a803d82a8a..53a45cf57c 100644 --- a/tests/integration/psync2-reg.tcl +++ b/tests/integration/psync2-reg.tcl @@ -1,7 +1,7 @@ # Issue 3899 regression test. # We create a chain of three instances: master -> slave -> slave2 # and continuously break the link while traffic is generated by -# redis-benchmark. At the end we check that the data is the same +# valkey-benchmark. At the end we check that the data is the same # everywhere. start_server {tags {"psync2 external:skip"}} { diff --git a/tests/integration/psync2.tcl b/tests/integration/psync2.tcl index 4abe059b1a..69d536601c 100644 --- a/tests/integration/psync2.tcl +++ b/tests/integration/psync2.tcl @@ -77,7 +77,7 @@ start_server {} { start_server {} { set master_id 0 ; # Current master set start_time [clock seconds] ; # Test start time - set counter_value 0 ; # Current value of the Redis counter "x" + set counter_value 0 ; # Current value of the server counter "x" # Config set debug_msg 0 ; # Enable additional debug messages diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl index 106fc0b50c..e3f92bf521 100644 --- a/tests/integration/rdb.tcl +++ b/tests/integration/rdb.tcl @@ -187,7 +187,7 @@ test {client freed during loading} { # connect and disconnect 5 clients set clients {} for {set j 0} {$j < 5} {incr j} { - lappend clients [redis_deferring_client] + lappend clients [valkey_deferring_client] } foreach rd $clients { $rd debug log bla @@ -255,14 +255,14 @@ start_server {overrides {save ""}} { assert {[s rdb_last_cow_size] == 0} # using a 200us delay, the bgsave is empirically taking about 10 seconds. - # we need it to take more than some 5 seconds, since redis only report COW once a second. + # we need it to take more than some 5 seconds, since the server only report COW once a second. r config set rdb-key-save-delay 200 r config set loglevel debug # populate the db with 10k keys of 512B each (since we want to measure the COW size by # changing some keys and read the reported COW size, we are using small key size to prevent from # the "dismiss mechanism" free memory and reduce the COW size) - set rd [redis_deferring_client 0] + set rd [valkey_deferring_client 0] set size 500 ;# aim for the 512 bin (sds overhead) set cmd_count 10000 for {set k 0} {$k < $cmd_count} {incr k} { diff --git a/tests/integration/replication-buffer.tcl b/tests/integration/replication-buffer.tcl index 64b26ca021..c57b86897b 100644 --- a/tests/integration/replication-buffer.tcl +++ b/tests/integration/replication-buffer.tcl @@ -294,7 +294,7 @@ test {Replica client-output-buffer size is limited to backlog_limit/16 when no r # Before this fix (#11905), the test would trigger an assertion in 'o->used >= c->ref_block_pos' test {The update of replBufBlock's repl_offset is ok - Regression test for #11666} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set replid [status $master master_replid] set offset [status $master repl_backlog_first_byte_offset] $rd psync $replid $offset diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index cbc5fb4a29..44ed3c69e1 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -108,7 +108,7 @@ start_server {tags {"repl external:skip"}} { test {BRPOPLPUSH replication, when blocking against empty list} { $A config resetstat - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd brpoplpush a b 5 r lpush a foo wait_for_condition 50 100 { @@ -122,7 +122,7 @@ start_server {tags {"repl external:skip"}} { test {BRPOPLPUSH replication, list exists} { $A config resetstat - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r lpush c 1 r lpush c 2 r lpush c 3 @@ -137,7 +137,7 @@ start_server {tags {"repl external:skip"}} { foreach whereto {left right} { test "BLMOVE ($wherefrom, $whereto) replication, when blocking against empty list" { $A config resetstat - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blmove a b $wherefrom $whereto 5 r lpush a foo wait_for_condition 50 100 { @@ -151,7 +151,7 @@ start_server {tags {"repl external:skip"}} { test "BLMOVE ($wherefrom, $whereto) replication, list exists" { $A config resetstat - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r lpush c 1 r lpush c 2 r lpush c 3 @@ -165,7 +165,7 @@ start_server {tags {"repl external:skip"}} { } test {BLPOP followed by role change, issue #2473} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blpop foo 0 ; # Block while B is a master # Turn B into master of A @@ -645,7 +645,7 @@ foreach testType {Successful Aborted} { } test {Busy script during async loading} { - set rd_replica [redis_deferring_client -1] + set rd_replica [valkey_deferring_client -1] $replica config set lua-time-limit 10 $rd_replica eval {while true do end} 0 after 200 @@ -1155,7 +1155,7 @@ test {replicaof right after disconnection} { fail "Can't turn the instance into a replica" } - set rd [redis_deferring_client -1] + set rd [valkey_deferring_client -1] $rd debug sleep 1 after 100 @@ -1353,7 +1353,7 @@ test {replica can handle EINTR if use diskless load} { start_server {tags {"repl" "external:skip"}} { test "replica do not write the reply to the replication link - SYNC (_addReplyToBufferOrList)" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set lines [count_log_lines 0] $rd sync @@ -1370,7 +1370,7 @@ start_server {tags {"repl" "external:skip"}} { } test "replica do not write the reply to the replication link - SYNC (addReplyDeferredLen)" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set lines [count_log_lines 0] $rd sync @@ -1387,7 +1387,7 @@ start_server {tags {"repl" "external:skip"}} { } test "replica do not write the reply to the replication link - PSYNC (_addReplyToBufferOrList)" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set lines [count_log_lines 0] $rd psync replicationid -1 @@ -1407,7 +1407,7 @@ start_server {tags {"repl" "external:skip"}} { } test "replica do not write the reply to the replication link - PSYNC (addReplyDeferredLen)" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set lines [count_log_lines 0] $rd psync replicationid -1 diff --git a/tests/integration/shutdown.tcl b/tests/integration/shutdown.tcl index b2ec32cbd1..b2fdb845a3 100644 --- a/tests/integration/shutdown.tcl +++ b/tests/integration/shutdown.tcl @@ -56,7 +56,7 @@ foreach how {sigterm shutdown} { exec kill -SIGTERM $master_pid } shutdown { - set rd [redis_deferring_client -1] + set rd [valkey_deferring_client -1] $rd shutdown } } @@ -152,8 +152,8 @@ test "Shutting down master waits for replica then fails" { $master incr k # Two clients call blocking SHUTDOWN in parallel. - set rd1 [redis_deferring_client -1] - set rd2 [redis_deferring_client -1] + set rd1 [valkey_deferring_client -1] + set rd2 [valkey_deferring_client -1] $rd1 shutdown $rd2 shutdown set info_clients [$master info clients] @@ -205,8 +205,8 @@ test "Shutting down master waits for replica then aborted" { $master incr k # Two clients call blocking SHUTDOWN in parallel. - set rd1 [redis_deferring_client -1] - set rd2 [redis_deferring_client -1] + set rd1 [valkey_deferring_client -1] + set rd2 [valkey_deferring_client -1] $rd1 shutdown $rd2 shutdown set info_clients [$master info clients] diff --git a/tests/integration/valkey-benchmark.tcl b/tests/integration/valkey-benchmark.tcl index c3254408c8..e62985312d 100644 --- a/tests/integration/valkey-benchmark.tcl +++ b/tests/integration/valkey-benchmark.tcl @@ -5,7 +5,7 @@ proc cmdstat {cmd} { return [cmdrstat $cmd r] } -# common code to reset stats, flush the db and run redis-benchmark +# common code to reset stats, flush the db and run valkey-benchmark proc common_bench_setup {cmd} { r config resetstat r flushall diff --git a/tests/integration/valkey-cli.tcl b/tests/integration/valkey-cli.tcl index 19b6e00bae..4a6a2b314c 100644 --- a/tests/integration/valkey-cli.tcl +++ b/tests/integration/valkey-cli.tcl @@ -360,7 +360,7 @@ start_server {tags {"cli"}} { if {!$::tls} { ;# fake_redis_node doesn't support TLS test_nontty_cli "ASK redirect test" { - # Set up two fake Redis nodes. + # Set up two fake nodes. set tclsh [info nameofexecutable] set script "tests/helpers/fake_redis_node.tcl" set port1 [find_available_port $::baseport $::portcount] diff --git a/tests/modules/aclcheck.c b/tests/modules/aclcheck.c index b746518043..28ea1a5630 100644 --- a/tests/modules/aclcheck.c +++ b/tests/modules/aclcheck.c @@ -226,7 +226,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) if (argc > 1) return RedisModule_WrongArity(ctx); /* When that flag is passed, we try to create too many categories, - * and the test expects this to fail. In this case redis returns REDISMODULE_ERR + * and the test expects this to fail. In this case the server returns REDISMODULE_ERR * and set errno to ENOMEM*/ if (argc == 1) { long long fail_flag = 0; @@ -289,14 +289,14 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) return REDISMODULE_ERR; /* This validates that, when module tries to add a category with invalid characters, - * redis returns REDISMODULE_ERR and set errno to `EINVAL` */ + * the server returns REDISMODULE_ERR and set errno to `EINVAL` */ if (RedisModule_AddACLCategory(ctx,"!nval!dch@r@cter$") == REDISMODULE_ERR) RedisModule_Assert(errno == EINVAL); else return REDISMODULE_ERR; /* This validates that, when module tries to add a category that already exists, - * redis returns REDISMODULE_ERR and set errno to `EBUSY` */ + * the server returns REDISMODULE_ERR and set errno to `EBUSY` */ if (RedisModule_AddACLCategory(ctx,"write") == REDISMODULE_ERR) RedisModule_Assert(errno == EBUSY); else diff --git a/tests/modules/auth.c b/tests/modules/auth.c index 19be95a0ab..5d6b740929 100644 --- a/tests/modules/auth.c +++ b/tests/modules/auth.c @@ -220,8 +220,8 @@ int test_rm_register_blocking_auth_cb(RedisModuleCtx *ctx, RedisModuleString **a return REDISMODULE_OK; } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { REDISMODULE_NOT_USED(argv); REDISMODULE_NOT_USED(argc); diff --git a/tests/modules/basics.c b/tests/modules/basics.c index 897cb5d87e..33086c3024 100644 --- a/tests/modules/basics.c +++ b/tests/modules/basics.c @@ -1,4 +1,4 @@ -/* Module designed to test the Redis modules subsystem. +/* Module designed to test the modules subsystem. * * ----------------------------------------------------------------------------- * diff --git a/tests/modules/blockedclient.c b/tests/modules/blockedclient.c index 4a59623fdc..c3a354670e 100644 --- a/tests/modules/blockedclient.c +++ b/tests/modules/blockedclient.c @@ -16,7 +16,7 @@ static volatile int g_slow_bg_operation = 0; static volatile int g_is_in_slow_bg_operation = 0; void *sub_worker(void *arg) { - // Get Redis module context + // Get module context RedisModuleCtx *ctx = (RedisModuleCtx *)arg; // Try acquiring GIL @@ -32,7 +32,7 @@ void *worker(void *arg) { // Retrieve blocked client RedisModuleBlockedClient *bc = (RedisModuleBlockedClient *)arg; - // Get Redis module context + // Get module context RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bc); // Acquire GIL @@ -55,7 +55,7 @@ void *worker(void *arg) { // Unblock client RedisModule_UnblockClient(bc, NULL); - // Free the Redis module context + // Free the module context RedisModule_FreeThreadSafeContext(ctx); return NULL; @@ -104,7 +104,7 @@ void *bg_call_worker(void *arg) { bg_call_data *bg = arg; RedisModuleBlockedClient *bc = bg->bc; - // Get Redis module context + // Get module context RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bg->bc); // Acquire GIL @@ -156,7 +156,7 @@ void *bg_call_worker(void *arg) { // Unblock client RedisModule_UnblockClient(bc, NULL); - // Free the Redis module context + // Free the module context RedisModule_FreeThreadSafeContext(ctx); return NULL; @@ -616,7 +616,7 @@ static void timer_callback(RedisModuleCtx *ctx, void *data) RedisModuleBlockedClient *bc = data; - // Get Redis module context + // Get module context RedisModuleCtx *reply_ctx = RedisModule_GetThreadSafeContext(bc); // Reply to client @@ -625,7 +625,7 @@ static void timer_callback(RedisModuleCtx *ctx, void *data) // Unblock client RedisModule_UnblockClient(bc, NULL); - // Free the Redis module context + // Free the module context RedisModule_FreeThreadSafeContext(reply_ctx); } diff --git a/tests/modules/blockonkeys.c b/tests/modules/blockonkeys.c index 94bb361231..c7b4797fd0 100644 --- a/tests/modules/blockonkeys.c +++ b/tests/modules/blockonkeys.c @@ -10,7 +10,7 @@ #define LIST_SIZE 1024 /* The FSL (Fixed-Size List) data type is a low-budget imitation of the - * native Redis list, in order to test list-like commands implemented + * list data type, in order to test list-like commands implemented * by a module. * Examples: FSL.PUSH, FSL.BPOP, etc. */ diff --git a/tests/modules/cmdintrospection.c b/tests/modules/cmdintrospection.c index 1a5e4863b6..3a1870a3f3 100644 --- a/tests/modules/cmdintrospection.c +++ b/tests/modules/cmdintrospection.c @@ -29,7 +29,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) .tips = "nondeterministic_output", .history = (RedisModuleCommandHistoryEntry[]){ /* NOTE: All versions specified should be the module's versions, not - * Redis'! We use Redis versions in this example for the purpose of + * the server's! We use server versions in this example for the purpose of * testing (comparing the output with the output of the vanilla * XADD). */ {"6.2.0", "Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option."}, diff --git a/tests/modules/datatype2.c b/tests/modules/datatype2.c index bc0dc3dfeb..fbfafd6f6d 100644 --- a/tests/modules/datatype2.c +++ b/tests/modules/datatype2.c @@ -45,7 +45,7 @@ * dict * * - * Keys in redis database: + * Keys in server database: * * ┌───────┐ * │ size │ @@ -64,7 +64,7 @@ * │ k3 │ ───┼─┐ │ k2 │ ───┼─┐ * │ │ │ │ │ │ │ │ * └─────┴─────┘ │ ┌───────┐ └─────┴─────┘ │ ┌───────┐ - * redis db[0] │ │ size │ redis db[1] │ │ size │ + * server db[0] │ │ size │ server db[1] │ │ size │ * └───────────►│ used │ └───────────►│ used │ * │ mask │ │ mask │ * └───────┘ └───────┘ diff --git a/tests/modules/hooks.c b/tests/modules/hooks.c index fc357d1445..24449d0d34 100644 --- a/tests/modules/hooks.c +++ b/tests/modules/hooks.c @@ -400,8 +400,8 @@ static int cmdKeyExpiry(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) return REDISMODULE_OK; } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { #define VerifySubEventSupported(e, s) \ if (!RedisModule_IsSubEventSupported(e, s)) { \ @@ -422,7 +422,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) RedisModule_SubscribeToServerEvent(ctx, RedisModuleEvent_ReplicaChange, replicationChangeCallback); RedisModule_SubscribeToServerEvent(ctx, - RedisModuleEvent_MasterLinkChange, rasterLinkChangeCallback); + RedisModuleEvent_PrimaryLinkChange, rasterLinkChangeCallback); /* persistence related hooks */ RedisModule_SubscribeToServerEvent(ctx, diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c index 1a284b50f4..d60d8a6934 100644 --- a/tests/modules/keyspace_events.c +++ b/tests/modules/keyspace_events.c @@ -316,8 +316,8 @@ static int cmdGetDels(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { return RedisModule_ReplyWithLongLong(ctx, dels); } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { if (RedisModule_Init(ctx,"testkeyspace",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR){ return REDISMODULE_ERR; diff --git a/tests/modules/postnotifications.c b/tests/modules/postnotifications.c index 770711bc3f..52baec2a5c 100644 --- a/tests/modules/postnotifications.c +++ b/tests/modules/postnotifications.c @@ -244,8 +244,8 @@ static void KeySpace_ServerEventCallback(RedisModuleCtx *ctx, RedisModuleEvent e if (res == REDISMODULE_ERR) KeySpace_ServerEventPostNotificationFree(pn_ctx); } -/* This function must be present on each Redis module. It is used in order to - * register the commands into the Redis server. */ +/* This function must be present on each module. It is used in order to + * register the commands into the server. */ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { REDISMODULE_NOT_USED(argv); REDISMODULE_NOT_USED(argc); diff --git a/tests/modules/test_lazyfree.c b/tests/modules/test_lazyfree.c index 7ba213ff86..d47cc7dbd8 100644 --- a/tests/modules/test_lazyfree.c +++ b/tests/modules/test_lazyfree.c @@ -166,7 +166,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) if (RedisModule_Init(ctx,"lazyfreetest",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR; - /* We only allow our module to be loaded when the redis core version is greater than the version of my module */ + /* We only allow our module to be loaded when the core version is greater than the version of my module */ if (RedisModule_GetTypeMethodVersion() < REDISMODULE_TYPE_METHOD_VERSION) { return REDISMODULE_ERR; } diff --git a/tests/modules/usercall.c b/tests/modules/usercall.c index 316de1eea0..4add315d5b 100644 --- a/tests/modules/usercall.c +++ b/tests/modules/usercall.c @@ -117,7 +117,7 @@ void *bg_call_worker(void *arg) { bg_call_data *bg = arg; RedisModuleBlockedClient *bc = bg->bc; - // Get Redis module context + // Get module context RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bg->bc); // Acquire GIL @@ -157,7 +157,7 @@ void *bg_call_worker(void *arg) { // Unblock client RedisModule_UnblockClient(bc, NULL); - // Free the Redis module context + // Free the module context RedisModule_FreeThreadSafeContext(ctx); return NULL; diff --git a/tests/sentinel/tests/10-replica-priority.tcl b/tests/sentinel/tests/10-replica-priority.tcl index d3f868afab..17a59ed66a 100644 --- a/tests/sentinel/tests/10-replica-priority.tcl +++ b/tests/sentinel/tests/10-replica-priority.tcl @@ -24,7 +24,7 @@ test "Check acceptable replica-priority values" { fail "Able to set replica-announced with something else than yes or no (a3b2c1) whereas it should not be possible" } - # test only the first redis replica, no need to double test + # test only the first replica, no need to double test break } } diff --git a/tests/support/cluster.tcl b/tests/support/cluster.tcl index 081ef6a952..f6ae76f2f8 100644 --- a/tests/support/cluster.tcl +++ b/tests/support/cluster.tcl @@ -1,4 +1,4 @@ -# Tcl redis cluster client as a wrapper of redis.rb. +# Tcl cluster client as a wrapper of redis.rb. # Copyright (C) 2014 Salvatore Sanfilippo # Released under the BSD license like Redis itself # @@ -57,7 +57,7 @@ proc redis_cluster {nodes {tls -1}} { # maps ::redis_cluster::slots($id) with an hash mapping slot numbers # to node IDs. # -# This function is called when a new Redis Cluster client is initialized +# This function is called when a new Cluster client is initialized # and every time we get a -MOVED redirection error. proc ::redis_cluster::__method__refresh_nodes_map {id} { # Contact the first responding startup node. @@ -258,7 +258,7 @@ proc ::redis_cluster::__dispatch__ {id method args} { proc ::redis_cluster::get_keys_from_command {cmd argv} { set cmd [string tolower $cmd] - # Most Redis commands get just one key as first argument. + # Most commands get just one key as first argument. if {[lsearch -exact $::redis_cluster::plain_commands $cmd] != -1} { return [list [lindex $argv 0]] } @@ -276,7 +276,7 @@ proc ::redis_cluster::get_keys_from_command {cmd argv} { } # Returns the CRC16 of the specified string. -# The CRC parameters are described in the Redis Cluster specification. +# The CRC parameters are described in the Cluster specification. set ::redis_cluster::XMODEMCRC16Lookup { 0x0000 0x1021 0x2042 0x3063 0x4084 0x50a5 0x60c6 0x70e7 0x8108 0x9129 0xa14a 0xb16b 0xc18c 0xd1ad 0xe1ce 0xf1ef @@ -323,7 +323,7 @@ proc ::redis_cluster::crc16 {s} { } # Hash a single key returning the slot it belongs to, Implemented hash -# tags as described in the Redis Cluster specification. +# tags as described in the Cluster specification. proc ::redis_cluster::hash {key} { set keylen [string length $key] set s {} @@ -352,7 +352,7 @@ proc ::redis_cluster::hash {key} { # Return the slot the specified keys hash to. # If the keys hash to multiple slots, an empty string is returned to -# signal that the command can't be run in Redis Cluster. +# signal that the command can't be run in Cluster. proc ::redis_cluster::get_slot_from_keys {keys} { set slot {} foreach k $keys { diff --git a/tests/support/cluster_util.tcl b/tests/support/cluster_util.tcl index 5160466474..d89a5a384d 100644 --- a/tests/support/cluster_util.tcl +++ b/tests/support/cluster_util.tcl @@ -1,8 +1,21 @@ # Cluster helper functions # Check if cluster configuration is consistent. +# All the nodes in the cluster should show same slots configuration and have health +# state "online" to be considered as consistent. proc cluster_config_consistent {} { for {set j 0} {$j < [llength $::servers]} {incr j} { + # Check if all the nodes are online + set shards_cfg [R $j CLUSTER SHARDS] + foreach shard_cfg $shards_cfg { + set nodes [dict get $shard_cfg nodes] + foreach node $nodes { + if {[dict get $node health] ne "online"} { + return 0 + } + } + } + if {$j == 0} { set base_cfg [R $j cluster slots] } else { @@ -27,7 +40,7 @@ proc cluster_size_consistent {cluster_size} { # Wait for cluster configuration to propagate and be consistent across nodes. proc wait_for_cluster_propagation {} { - wait_for_condition 50 100 { + wait_for_condition 1000 50 { [cluster_config_consistent] eq 1 } else { fail "cluster config did not reach a consistent state" diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl index 53fa9fe915..bc106ffd68 100644 --- a/tests/support/redis.tcl +++ b/tests/support/redis.tcl @@ -1,4 +1,4 @@ -# Tcl client library - used by the Redis test +# Tcl client library - used by the server test # Copyright (C) 2009-2014 Salvatore Sanfilippo # Released under the BSD license like Redis itself # diff --git a/tests/support/response_transformers.tcl b/tests/support/response_transformers.tcl index c49bbb15c3..eeba2ff3bb 100644 --- a/tests/support/response_transformers.tcl +++ b/tests/support/response_transformers.tcl @@ -1,4 +1,4 @@ -# Tcl client library - used by the Redis test +# Tcl client library - used by the server test # Copyright (C) 2009-2023 Redis Ltd. # Released under the BSD license like Redis itself # diff --git a/tests/support/server.tcl b/tests/support/server.tcl index a4af3db174..c28444950c 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -480,7 +480,7 @@ proc start_server {options {code undefined}} { set config {} if {$::tls} { if {$::tls_module} { - lappend config_lines [list "loadmodule" [format "%s/src/redis-tls.so" [pwd]]] + lappend config_lines [list "loadmodule" [format "%s/src/valkey-tls.so" [pwd]]] } dict set config "tls-cert-file" [format "%s/tests/tls/server.crt" [pwd]] dict set config "tls-key-file" [format "%s/tests/tls/server.key" [pwd]] diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 64f5eaa52c..cba7e72b5b 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -542,7 +542,7 @@ proc find_valgrind_errors {stderr on_termination} { return "" } - # Look for the absence of a leak free summary (happens when redis isn't terminated properly). + # Look for the absence of a leak free summary (happens when the server isn't terminated properly). if {(![regexp -- {definitely lost: 0 bytes} $buf] && ![regexp -- {no leaks are possible} $buf])} { return $buf @@ -552,7 +552,7 @@ proc find_valgrind_errors {stderr on_termination} { } # Execute a background process writing random data for the specified number -# of seconds to the specified Redis instance. +# of seconds to the specified the server instance. proc start_write_load {host port seconds} { set tclsh [info nameofexecutable] exec $tclsh tests/helpers/gen_write_load.tcl $host $port $seconds $::tls 0 & @@ -596,7 +596,7 @@ proc lshuffle {list} { } # Execute a background process writing complex data for the specified number -# of ops to the specified Redis instance. +# of ops to the specified server instance. proc start_bg_complex_data {host port db ops} { set tclsh [info nameofexecutable] exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops $::tls & @@ -609,7 +609,7 @@ proc stop_bg_complex_data {handle} { # Write num keys with the given key prefix and value size (in bytes). If idx is # given, it's the index (AKA level) used with the srv procedure and it specifies -# to which Redis instance to write the keys. +# to which server instance to write the keys. proc populate {num {prefix key:} {size 3} {idx 0} {prints false} {expires 0}} { r $idx deferred 1 if {$num > 16} {set pipeline 16} else {set pipeline $num} @@ -713,11 +713,11 @@ proc generate_fuzzy_traffic_on_key {key duration} { # find a random command for our key type set cmd_idx [expr {int(rand()*[llength $cmds])}] set cmd [lindex $cmds $cmd_idx] - # get the command details from redis + # get the command details from the server if { [ catch { set cmd_info [lindex [r command info $cmd] 0] } err ] } { - # if we failed, it means redis crashed after the previous command + # if we failed, it means the server crashed after the previous command return $sent } # try to build a valid command argument @@ -1030,7 +1030,7 @@ proc init_large_mem_vars {} { } } -# Utility function to write big argument into redis client connection +# Utility function to write big argument into a server client connection proc write_big_bulk {size {prefix ""} {skip_read no}} { init_large_mem_vars diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 6d9ca6299a..340fbd65e0 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -1,4 +1,4 @@ -# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com +# Server test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com # This software is released under the BSD License. See the COPYING file for # more information. @@ -105,13 +105,14 @@ set ::all_tests { unit/cluster/links unit/cluster/cluster-response-tls unit/cluster/failure-marking + unit/cluster/sharded-pubsub } # Index to the next test to run in the ::all_tests list. set ::next_test 0 set ::host 127.0.0.1 set ::port 6379; # port for external server -set ::baseport 21111; # initial port for spawned redis servers +set ::baseport 21111; # initial port for spawned servers set ::portcount 8000; # we don't wanna use more than 10000 to avoid collision with cluster bus ports set ::traceleaks 0 set ::valgrind 0 @@ -137,7 +138,7 @@ set ::accurate 0; # If true runs fuzz tests with more iterations set ::force_failure 0 set ::timeout 1200; # 20 minutes without progresses will quit the test. set ::last_progress [clock seconds] -set ::active_servers {} ; # Pids of active Redis instances. +set ::active_servers {} ; # Pids of active server instances. set ::dont_clean 0 set ::dont_pre_clean 0 set ::wait_server 0 @@ -153,7 +154,7 @@ set ::large_memory 0 set ::log_req_res 0 set ::force_resp3 0 -# Set to 1 when we are running in client mode. The Redis test uses a +# Set to 1 when we are running in client mode. The server test uses a # server-client model to run tests simultaneously. The server instance # runs the specified number of client instances that will actually run tests. # The server is responsible of showing the result to the user, and exit with @@ -217,7 +218,7 @@ proc r {args} { [srv $level "client"] {*}$args } -# Returns a Redis instance by index. +# Returns a server instance by index. proc Rn {n} { set level [expr -1*$n] return [srv $level "client"] @@ -255,7 +256,7 @@ proc reconnect {args} { lset ::servers end+$level $srv } -proc redis_deferring_client {args} { +proc valkey_deferring_client {args} { set level 0 if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { set level [lindex $args 0] @@ -908,7 +909,7 @@ proc close_replication_stream {s} { return } -# With the parallel test running multiple Redis instances at the same time +# With the parallel test running multiple server instances at the same time # we need a fast enough computer, otherwise a lot of tests may generate # false positives. # If the computer is too slow we revert the sequential test without any diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index cdd12511e2..2edac213e5 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -108,7 +108,7 @@ start_server {tags {"acl external:skip"}} { } {*NOPERM*channel*} test {By default, only default user is able to subscribe to any channel} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd AUTH default pwd $rd read $rd SUBSCRIBE foo @@ -124,7 +124,7 @@ start_server {tags {"acl external:skip"}} { } {*NOPERM*channel*} test {By default, only default user is able to subscribe to any shard channel} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd AUTH default pwd $rd read $rd SSUBSCRIBE foo @@ -140,7 +140,7 @@ start_server {tags {"acl external:skip"}} { } {*NOPERM*channel*} test {By default, only default user is able to subscribe to any pattern} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd AUTH default pwd $rd read $rd PSUBSCRIBE bar* @@ -209,7 +209,7 @@ start_server {tags {"acl external:skip"}} { } test {It's possible to allow subscribing to a subset of channels} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd AUTH psuser pspass $rd read $rd SUBSCRIBE foo:1 @@ -222,7 +222,7 @@ start_server {tags {"acl external:skip"}} { } {*NOPERM*channel*} test {It's possible to allow subscribing to a subset of shard channels} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd AUTH psuser pspass $rd read $rd SSUBSCRIBE foo:1 @@ -235,7 +235,7 @@ start_server {tags {"acl external:skip"}} { } {*NOPERM*channel*} test {It's possible to allow subscribing to a subset of channel patterns} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd AUTH psuser pspass $rd read $rd PSUBSCRIBE foo:1 @@ -248,7 +248,7 @@ start_server {tags {"acl external:skip"}} { } {*NOPERM*channel*} test {Subscribers are killed when revoked of channel permission} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r ACL setuser psuser resetchannels &foo:1 $rd AUTH psuser pspass $rd read @@ -262,7 +262,7 @@ start_server {tags {"acl external:skip"}} { } {0} test {Subscribers are killed when revoked of channel permission} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r ACL setuser psuser resetchannels &foo:1 $rd AUTH psuser pspass $rd read @@ -276,7 +276,7 @@ start_server {tags {"acl external:skip"}} { } {0} test {Subscribers are killed when revoked of pattern permission} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r ACL setuser psuser resetchannels &bar:* $rd AUTH psuser pspass $rd read @@ -290,7 +290,7 @@ start_server {tags {"acl external:skip"}} { } {0} test {Subscribers are killed when revoked of allchannels permission} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r ACL setuser psuser allchannels $rd AUTH psuser pspass $rd read @@ -304,7 +304,7 @@ start_server {tags {"acl external:skip"}} { } {0} test {Subscribers are pardoned if literal permissions are retained and/or gaining allchannels} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r ACL setuser psuser resetchannels &foo:1 &bar:* &orders $rd AUTH psuser pspass $rd read @@ -326,7 +326,7 @@ start_server {tags {"acl external:skip"}} { test {blocked command gets rejected when reprocessed after permission change} { r auth default "" r config resetstat - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r ACL setuser psuser reset on nopass +@all allkeys $rd AUTH psuser pspass $rd read @@ -526,7 +526,7 @@ start_server {tags {"acl external:skip"}} { } } - # Note that the order of the generated ACL rules is not stable in Redis + # Note that the order of the generated ACL rules is not stable in the server # so we need to match the different parts and not as a whole string. test {ACL GETUSER is able to translate back command permissions} { # Subtractive @@ -754,7 +754,7 @@ start_server {tags {"acl external:skip"}} { } test {ACL LOG can distinguish the transaction context (2)} { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] r ACL SETUSER antirez +incr r AUTH antirez foo @@ -830,7 +830,7 @@ start_server {tags {"acl external:skip"}} { test {When default user is off, new connections are not authenticated} { r ACL setuser default off - catch {set rd1 [redis_deferring_client]} e + catch {set rd1 [valkey_deferring_client]} e r ACL setuser default on set e } {*NOAUTH*} @@ -1024,8 +1024,8 @@ start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "allc reconnect r ACL SETUSER doug on nopass resetchannels &test* +@all ~* - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] $rd1 AUTH alice alice $rd1 read @@ -1055,8 +1055,8 @@ start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "allc reconnect r ACL SETUSER mortimer on >mortimer ~* &* +@all - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] $rd1 AUTH alice alice $rd1 read diff --git a/tests/unit/bitfield.tcl b/tests/unit/bitfield.tcl index 21091aa99c..2dfc77b48a 100644 --- a/tests/unit/bitfield.tcl +++ b/tests/unit/bitfield.tcl @@ -114,7 +114,7 @@ start_server {tags {"bitops"}} { } set max [expr {$min+$range-1}] - # Compare Tcl vs Redis + # Compare Tcl vs the server set range2 [expr {$range*2}] set value [expr {($min*2)+[randomInt $range2]}] set increment [expr {($min*2)+[randomInt $range2]}] @@ -166,7 +166,7 @@ start_server {tags {"bitops"}} { } set max [expr {$min+$range-1}] - # Compare Tcl vs Redis + # Compare Tcl vs the server set range2 [expr {$range*2}] set value [expr {($min*2)+[randomInt $range2]}] set increment [expr {($min*2)+[randomInt $range2]}] diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl index f50f65dfa0..edcafdee07 100644 --- a/tests/unit/bitops.tcl +++ b/tests/unit/bitops.tcl @@ -1,4 +1,4 @@ -# Compare Redis commands against Tcl implementations of the same commands. +# Compare server commands against Tcl implementations of the same commands. proc count_bits s { binary scan $s b* bits string length [regsub -all {0} $bits {}] diff --git a/tests/unit/client-eviction.tcl b/tests/unit/client-eviction.tcl index 1fc7c02ca9..d40dc88c3f 100644 --- a/tests/unit/client-eviction.tcl +++ b/tests/unit/client-eviction.tcl @@ -1,6 +1,6 @@ tags {"external:skip logreqres:skip"} { -# Get info about a redis client connection: +# Get info about a server client connection: # name - name of client we want to query # f - field name from "CLIENT LIST" we want to get proc client_field {name f} { @@ -27,7 +27,7 @@ proc gen_client {} { return [list $rr $name] } -# Sum a value across all redis client connections: +# Sum a value across all server client connections: # f - the field name from "CLIENT LIST" we want to sum proc clients_sum {f} { set sum 0 @@ -257,7 +257,7 @@ start_server {} { test "client evicted due to output buf" { r flushdb r setrange k 200000 v - set rr [redis_deferring_client] + set rr [valkey_deferring_client] $rr client setname test_client $rr flush assert {[$rr read] == "OK"} @@ -325,10 +325,10 @@ start_server {} { r setrange k $obuf_size v set rr1 [redis_client] $rr1 client setname "qbuf-client" - set rr2 [redis_deferring_client] + set rr2 [valkey_deferring_client] $rr2 client setname "obuf-client1" assert_equal [$rr2 read] OK - set rr3 [redis_deferring_client] + set rr3 [valkey_deferring_client] $rr3 client setname "obuf-client2" assert_equal [$rr3 read] OK diff --git a/tests/unit/cluster/cli.tcl b/tests/unit/cluster/cli.tcl index 734dd19c9f..948edc585d 100644 --- a/tests/unit/cluster/cli.tcl +++ b/tests/unit/cluster/cli.tcl @@ -1,4 +1,4 @@ -# Primitive tests on cluster-enabled redis using redis-cli +# Primitive tests on cluster-enabled server using valkey-cli source tests/support/cli.tcl @@ -17,7 +17,7 @@ start_multiple_servers 3 [list overrides $base_conf] { set node2 [srv -1 client] set node3 [srv -2 client] set node3_pid [srv -2 pid] - set node3_rd [redis_deferring_client -2] + set node3_rd [valkey_deferring_client -2] test {Create 3 node cluster} { exec src/valkey-cli --cluster-yes --cluster create \ @@ -79,7 +79,7 @@ start_multiple_servers 3 [list overrides $base_conf] { } } - set node1_rd [redis_deferring_client 0] + set node1_rd [valkey_deferring_client 0] test "use previous hostip in \"cluster-preferred-endpoint-type unknown-endpoint\" mode" { @@ -87,7 +87,7 @@ start_multiple_servers 3 [list overrides $base_conf] { set endpoint_type_before_set [lindex [split [$node1 CONFIG GET cluster-preferred-endpoint-type] " "] 1] $node1 CONFIG SET cluster-preferred-endpoint-type unknown-endpoint - # when redis-cli not in cluster mode, return MOVE with empty host + # when valkey-cli not in cluster mode, return MOVE with empty host set slot_for_foo [$node1 CLUSTER KEYSLOT foo] assert_error "*MOVED $slot_for_foo :*" {$node1 set foo bar} @@ -272,7 +272,7 @@ test {Migrate the last slot away from a node using valkey-cli} { set owner_r [redis $owner_host $owner_port 0 $::tls] set owner_id [$owner_r CLUSTER MYID] - # Move slot to new node using plain Redis commands + # Move slot to new node using plain commands assert_equal OK [$newnode_r CLUSTER SETSLOT $slot IMPORTING $owner_id] assert_equal OK [$owner_r CLUSTER SETSLOT $slot MIGRATING $newnode_id] assert_equal {foo} [$owner_r CLUSTER GETKEYSINSLOT $slot 10] @@ -295,7 +295,7 @@ test {Migrate the last slot away from a node using valkey-cli} { fail "Cluster doesn't stabilize" } - # Move the only slot back to original node using redis-cli + # Move the only slot back to original node using valkey-cli exec src/valkey-cli --cluster reshard 127.0.0.1:[srv -3 port] \ --cluster-from $newnode_id \ --cluster-to $owner_id \ diff --git a/tests/unit/cluster/hostnames.tcl b/tests/unit/cluster/hostnames.tcl index f318240626..f08c9cfa84 100644 --- a/tests/unit/cluster/hostnames.tcl +++ b/tests/unit/cluster/hostnames.tcl @@ -116,10 +116,11 @@ test "Verify the nodes configured with prefer hostname only show hostname for ne # Have everyone forget node 6 and isolate it from the cluster. isolate_node 6 - # Set hostnames for the masters, now that the node is isolated - R 0 config set cluster-announce-hostname "shard-1.com" - R 1 config set cluster-announce-hostname "shard-2.com" - R 2 config set cluster-announce-hostname "shard-3.com" + set primaries 3 + for {set j 0} {$j < $primaries} {incr j} { + # Set hostnames for the masters, now that the node is isolated + R $j config set cluster-announce-hostname "shard-$j.com" + } # Prevent Node 0 and Node 6 from properly meeting, # they'll hang in the handshake phase. This allows us to @@ -149,9 +150,17 @@ test "Verify the nodes configured with prefer hostname only show hostname for ne } else { fail "Node did not learn about the 2 shards it can talk to" } - set slot_result [R 6 CLUSTER SLOTS] - assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-2.com" - assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-3.com" + wait_for_condition 50 100 { + [lindex [get_slot_field [R 6 CLUSTER SLOTS] 0 2 3] 1] eq "shard-1.com" + } else { + fail "hostname for shard-1 didn't reach node 6" + } + + wait_for_condition 50 100 { + [lindex [get_slot_field [R 6 CLUSTER SLOTS] 1 2 3] 1] eq "shard-2.com" + } else { + fail "hostname for shard-2 didn't reach node 6" + } # Also make sure we know about the isolated master, we # just can't reach it. @@ -170,10 +179,14 @@ test "Verify the nodes configured with prefer hostname only show hostname for ne } else { fail "Node did not learn about the 2 shards it can talk to" } - set slot_result [R 6 CLUSTER SLOTS] - assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-1.com" - assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-2.com" - assert_equal [lindex [get_slot_field $slot_result 2 2 3] 1] "shard-3.com" + + for {set j 0} {$j < $primaries} {incr j} { + wait_for_condition 50 100 { + [lindex [get_slot_field [R 6 CLUSTER SLOTS] $j 2 3] 1] eq "shard-$j.com" + } else { + fail "hostname information for shard-$j didn't reach node 6" + } + } } test "Test restart will keep hostname information" { diff --git a/tests/unit/cluster/links.tcl b/tests/unit/cluster/links.tcl index a202c378bd..1f840fd960 100644 --- a/tests/unit/cluster/links.tcl +++ b/tests/unit/cluster/links.tcl @@ -80,13 +80,13 @@ start_cluster 1 2 {tags {external:skip cluster}} { set channelname ch3 # subscribe on replica1 - set subscribeclient1 [redis_deferring_client -1] + set subscribeclient1 [valkey_deferring_client -1] $subscribeclient1 deferred 1 $subscribeclient1 SSUBSCRIBE $channelname $subscribeclient1 read # subscribe on replica2 - set subscribeclient2 [redis_deferring_client -2] + set subscribeclient2 [valkey_deferring_client -2] $subscribeclient2 deferred 1 $subscribeclient2 SSUBSCRIBE $channelname $subscribeclient2 read @@ -191,7 +191,7 @@ start_cluster 3 0 {tags {external:skip cluster}} { # On primary1, set cluster link send buffer limit to 256KB, which is large enough to not be # overflowed by regular gossip messages but also small enough that it doesn't take too much - # memory to overflow it. If it is set too high, Redis may get OOM killed by kernel before this + # memory to overflow it. If it is set too high, the server may get OOM killed by kernel before this # limit is overflowed in some RAM-limited test environments. set oldlimit [lindex [$primary1 CONFIG get cluster-link-sendbuf-limit] 1] $primary1 CONFIG set cluster-link-sendbuf-limit [expr 256*1024] diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl index dd75952903..9018270d67 100644 --- a/tests/unit/dump.tcl +++ b/tests/unit/dump.tcl @@ -285,7 +285,7 @@ start_server {tags {"dump"}} { assert {[$first exists key] == 1} assert {[$second exists key] == 0} - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd debug sleep 1.0 ; # Make second server unable to reply. set e {} catch {r -1 migrate $second_host $second_port key 9 500} e diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl index 08fa88a105..a2554c865d 100644 --- a/tests/unit/expire.tcl +++ b/tests/unit/expire.tcl @@ -187,7 +187,7 @@ start_server {tags {"expire"}} { r psetex key2 500 a r psetex key3 500 a assert_equal 3 [r dbsize] - # Redis expires random keys ten times every second so we are + # The server expires random keys ten times every second so we are # fairly sure that all the three keys should be evicted after # two seconds. wait_for_condition 20 100 { @@ -204,7 +204,7 @@ start_server {tags {"expire"}} { r psetex key2{t} 500 a r psetex key3{t} 500 a set size1 [r dbsize] - # Redis expires random keys ten times every second so we are + # The server expires random keys ten times every second so we are # fairly sure that all the three keys should be evicted after # one second. after 1000 diff --git a/tests/unit/functions.tcl b/tests/unit/functions.tcl index 58ac0c8888..4415b60e57 100644 --- a/tests/unit/functions.tcl +++ b/tests/unit/functions.tcl @@ -235,7 +235,7 @@ start_server {tags {"scripting"}} { } {x} test {FUNCTION - test function kill} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r config set busy-reply-threshold 10 r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}] $rd fcall test 0 @@ -249,7 +249,7 @@ start_server {tags {"scripting"}} { } test {FUNCTION - test script kill not working on function} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r config set busy-reply-threshold 10 r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}] $rd fcall test 0 @@ -264,7 +264,7 @@ start_server {tags {"scripting"}} { } test {FUNCTION - test function kill not working on eval} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r config set busy-reply-threshold 10 $rd eval {local a = 1 while true do a = a + 1 end} 0 after 200 diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index 6175329da0..19af863993 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -1,5 +1,5 @@ # Helper functions to simulate search-in-radius in the Tcl side in order to -# verify the Redis implementation with a fuzzy test. +# verify the server implementation with a fuzzy test. proc geo_degrad deg {expr {$deg*(atan(1)*8/360)}} proc geo_raddeg rad {expr {$rad/(atan(1)*8/360)}} @@ -632,7 +632,7 @@ start_server {tags {"geo"}} { continue } if {$mydist < [expr {$radius_km*1000}]} { - # This is a false positive for redis since given the + # This is a false positive for the server since given the # same points the higher precision calculation provided # by TCL shows the point within range incr rounding_errors diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl index ee437189fb..c1e24ceec6 100644 --- a/tests/unit/hyperloglog.tcl +++ b/tests/unit/hyperloglog.tcl @@ -167,7 +167,7 @@ start_server {tags {"hll"}} { } # Use the hyperloglog to check if it crashes - # Redis in some way. + # the server in some way. catch { r pfcount hll } diff --git a/tests/unit/info.tcl b/tests/unit/info.tcl index 6e2d381f57..b62ca86d77 100644 --- a/tests/unit/info.tcl +++ b/tests/unit/info.tcl @@ -60,7 +60,7 @@ start_server {tags {"info" "external:skip"}} { r config resetstat r CONFIG SET latency-tracking yes r CONFIG SET latency-tracking-info-percentiles "50.0 99.0 99.9" - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del list1{t} $rd blpop list1{t} 0 @@ -259,7 +259,7 @@ start_server {tags {"info" "external:skip"}} { test {errorstats: blocking commands} { r config resetstat - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set rd_id [$rd read] r del list1{t} @@ -394,8 +394,8 @@ start_server {tags {"info" "external:skip"}} { test {clients: pubsub clients} { set info [r info clients] assert_equal [getInfoProperty $info pubsub_clients] {0} - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] # basic count assert_equal {1} [ssubscribe $rd1 {chan1}] assert_equal {1} [subscribe $rd2 {chan2}] diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 1e6e38625c..9ec8bb07b4 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -44,9 +44,9 @@ start_server {tags {"introspection"}} { # 3 retries of increasing sleep_time, i.e. start with 2s, then go 4s, 8s. set sleep_time 2 for {set i 0} {$i < 3} {incr i} { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] r debug sleep $sleep_time - set rd2 [redis_deferring_client] + set rd2 [valkey_deferring_client] r acl setuser dummy on nopass +ping $rd1 auth dummy "" $rd1 read @@ -80,16 +80,16 @@ start_server {tags {"introspection"}} { test {CLIENT KILL SKIPME YES/NO will kill all clients} { # Kill all clients except `me` - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] set connected_clients [s connected_clients] assert {$connected_clients >= 3} set res [r client kill skipme yes] assert {$res == $connected_clients - 1} # Kill all clients, including `me` - set rd3 [redis_deferring_client] - set rd4 [redis_deferring_client] + set rd3 [valkey_deferring_client] + set rd4 [valkey_deferring_client] set connected_clients [s connected_clients] assert {$connected_clients == 3} set res [r client kill skipme no] @@ -162,7 +162,7 @@ start_server {tags {"introspection"}} { } {} {needs:save} test "CLIENT REPLY OFF/ON: disable all commands reply" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # These replies were silenced. $rd client reply off @@ -178,7 +178,7 @@ start_server {tags {"introspection"}} { } test "CLIENT REPLY SKIP: skip the next command reply" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # The first pong reply was silenced. $rd client reply skip @@ -191,7 +191,7 @@ start_server {tags {"introspection"}} { } test "CLIENT REPLY ON: unset SKIP flag" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client reply skip $rd client reply on @@ -204,7 +204,7 @@ start_server {tags {"introspection"}} { } test {MONITOR can log executed commands} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd monitor assert_match {*OK*} [$rd read] r set foo bar @@ -215,7 +215,7 @@ start_server {tags {"introspection"}} { } {*"set" "foo"*"get" "foo"*} test {MONITOR can log commands issued by the scripting engine} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd monitor $rd read ;# Discard the OK r eval {redis.call('set',KEYS[1],ARGV[1])} 1 foo bar @@ -228,7 +228,7 @@ start_server {tags {"introspection"}} { r function load replace {#!lua name=test redis.register_function('test', function() return redis.call('set', 'foo', 'bar') end) } - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd monitor $rd read ;# Discard the OK r fcall test 0 @@ -238,7 +238,7 @@ start_server {tags {"introspection"}} { } test {MONITOR supports redacting command arguments} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd monitor $rd read ; # Discard the OK @@ -267,7 +267,7 @@ start_server {tags {"introspection"}} { } {0} {needs:repl} test {MONITOR correctly handles multi-exec cases} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd monitor $rd read ; # Discard the OK @@ -296,8 +296,8 @@ start_server {tags {"introspection"}} { # need to reconnect in order to reset the clients state reconnect - set rd [redis_deferring_client] - set bc [redis_deferring_client] + set rd [valkey_deferring_client] + set bc [valkey_deferring_client] r del mylist $rd monitor @@ -363,7 +363,7 @@ start_server {tags {"introspection"}} { } {*name=someothername*} test {After CLIENT SETNAME, connection can still be closed} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client setname foobar assert_equal [$rd read] "OK" assert_match {*foobar*} [r client list] @@ -612,7 +612,7 @@ start_server {tags {"introspection"}} { } test {CONFIG SET rollback on apply error} { - # This test tries to configure a used port number in redis. This is expected + # This test tries to configure a used port number in the server. This is expected # to pass the `CONFIG SET` validity checking implementation but fail on # actual "apply" of the setting. This will validate that after an "apply" # failure we rollback to the previous values. @@ -645,7 +645,7 @@ start_server {tags {"introspection"}} { set used_port [find_available_port $::baseport $::portcount] dict set some_configs port $used_port - # Run a dummy server on used_port so we know we can't configure redis to + # Run a dummy server on used_port so we know we can't configure the server to # use it. It's ok for this to fail because that means used_port is invalid # anyway catch {socket -server dummy_accept -myaddr 127.0.0.1 $used_port} e diff --git a/tests/unit/limits.tcl b/tests/unit/limits.tcl index 3af151981e..a593eac7b9 100644 --- a/tests/unit/limits.tcl +++ b/tests/unit/limits.tcl @@ -9,7 +9,7 @@ start_server {tags {"limits network external:skip"} overrides {maxclients 10}} { catch { while {$c < 50} { incr c - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd ping $rd read after 100 diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl index 363dab4725..109707d1c6 100644 --- a/tests/unit/maxmemory.tcl +++ b/tests/unit/maxmemory.tcl @@ -56,7 +56,7 @@ start_server {tags {"maxmemory" "external:skip"}} { init_test $client_eviction for {set j 0} {$j < 20} {incr j} { - set rr [redis_deferring_client] + set rr [valkey_deferring_client] lappend clients $rr } @@ -85,7 +85,7 @@ start_server {tags {"maxmemory" "external:skip"}} { init_test $client_eviction for {set j 0} {$j < 30} {incr j} { - set rr [redis_deferring_client] + set rr [valkey_deferring_client] lappend clients $rr } @@ -272,7 +272,7 @@ start_server {tags {"maxmemory external:skip"}} { incr numkeys } # Now we add the same number of volatile keys already added. - # We expect Redis to evict only volatile keys in order to make + # We expect the server to evict only volatile keys in order to make # space. set err 0 for {set j 0} {$j < $numkeys} {incr j} { @@ -349,12 +349,12 @@ proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} } # put the slave to sleep - set rd_slave [redis_deferring_client] + set rd_slave [valkey_deferring_client] pause_process $slave_pid # send some 10mb worth of commands that don't increase the memory usage if {$pipeline == 1} { - set rd_master [redis_deferring_client -1] + set rd_master [valkey_deferring_client -1] for {set k 0} {$k < $cmd_count} {incr k} { $rd_master setrange key:0 0 [string repeat A $payload_len] } @@ -406,7 +406,7 @@ proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} # test that slave buffer are counted correctly # we wanna use many small commands, and we don't wanna wait long -# so we need to use a pipeline (redis_deferring_client) +# so we need to use a pipeline (valkey_deferring_client) # that may cause query buffer to fill and induce eviction, so we disable it test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1 @@ -435,7 +435,7 @@ start_server {tags {"maxmemory external:skip"}} { # Next writing command will trigger evicting some keys if last # command trigger DB dict rehash r set k2 v2 - # There must be 4098 keys because redis doesn't evict keys. + # There must be 4098 keys because the server doesn't evict keys. r dbsize } {4098} } @@ -450,7 +450,7 @@ start_server {tags {"maxmemory external:skip"}} { # 10 clients listening on tracking messages set clients {} for {set j 0} {$j < 10} {incr j} { - lappend clients [redis_deferring_client] + lappend clients [valkey_deferring_client] } foreach rd $clients { $rd HELLO 3 diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl index e6ae34e7df..525db407bf 100644 --- a/tests/unit/memefficiency.tcl +++ b/tests/unit/memefficiency.tcl @@ -1,6 +1,6 @@ proc test_memory_efficiency {range} { r flushall - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set base_mem [s used_memory] set written 0 for {set j 0} {$j < 10000} {incr j} { @@ -193,7 +193,7 @@ run_solo {defrag} { # Populate memory with interleaving script-key pattern of same size set dummy_script "--[string repeat x 400]\nreturn " - set rd [redis_deferring_client] + set rd [valkey_deferring_client] for {set j 0} {$j < $n} {incr j} { set val "$dummy_script[format "%06d" $j]" $rd script load $val @@ -286,7 +286,7 @@ run_solo {defrag} { r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream > # create big keys with 10k items - set rd [redis_deferring_client] + set rd [valkey_deferring_client] for {set j 0} {$j < 10000} {incr j} { $rd hset bighash $j [concat "asdfasdfasdf" $j] $rd lpush biglist [concat "asdfasdfasdf" $j] @@ -418,8 +418,8 @@ run_solo {defrag} { # Populate memory with interleaving pubsub-key pattern of same size set n 50000 set dummy_channel "[string repeat x 400]" - set rd [redis_deferring_client] - set rd_pubsub [redis_deferring_client] + set rd [valkey_deferring_client] + set rd_pubsub [valkey_deferring_client] for {set j 0} {$j < $n} {incr j} { set channel_name "$dummy_channel[format "%06d" $j]" $rd_pubsub subscribe $channel_name @@ -518,7 +518,7 @@ run_solo {defrag} { r config set list-max-ziplist-size 5 ;# list of 500k items will have 100k quicklist nodes # create big keys with 10k items - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set expected_frag 1.7 # add a mass of list nodes to two lists (allocations are interlaced) @@ -637,7 +637,7 @@ run_solo {defrag} { } # add a mass of keys with 600 bytes values, fill the bin of 640 bytes which has 32 regs per slab. - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set keys 640000 for {set j 0} {$j < $keys} {incr j} { $rd setrange $j 600 x diff --git a/tests/unit/moduleapi/async_rm_call.tcl b/tests/unit/moduleapi/async_rm_call.tcl index 1bf12de237..e88e8cf356 100644 --- a/tests/unit/moduleapi/async_rm_call.tcl +++ b/tests/unit/moduleapi/async_rm_call.tcl @@ -16,7 +16,7 @@ start_server {tags {"modules"}} { } test "Blpop on threaded async RM_Call" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd do_rm_call_async_on_thread blpop l 0 wait_for_blocked_clients_count 1 @@ -29,7 +29,7 @@ start_server {tags {"modules"}} { foreach cmd {do_rm_call_async do_rm_call_async_script_mode } { test "Blpop on async RM_Call using $cmd" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd $cmd blpop l 0 wait_for_blocked_clients_count 1 @@ -40,7 +40,7 @@ start_server {tags {"modules"}} { } test "Brpop on async RM_Call using $cmd" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd $cmd brpop l 0 wait_for_blocked_clients_count 1 @@ -51,7 +51,7 @@ start_server {tags {"modules"}} { } test "Brpoplpush on async RM_Call using $cmd" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd $cmd brpoplpush l1 l2 0 wait_for_blocked_clients_count 1 @@ -63,7 +63,7 @@ start_server {tags {"modules"}} { } {a} test "Blmove on async RM_Call using $cmd" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd $cmd blmove l1 l2 LEFT LEFT 0 wait_for_blocked_clients_count 1 @@ -75,7 +75,7 @@ start_server {tags {"modules"}} { } {a} test "Bzpopmin on async RM_Call using $cmd" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd $cmd bzpopmin s 0 wait_for_blocked_clients_count 1 @@ -86,7 +86,7 @@ start_server {tags {"modules"}} { } test "Bzpopmax on async RM_Call using $cmd" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd $cmd bzpopmax s 0 wait_for_blocked_clients_count 1 @@ -98,7 +98,7 @@ start_server {tags {"modules"}} { } test {Nested async RM_Call} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0 wait_for_blocked_clients_count 1 @@ -109,8 +109,8 @@ start_server {tags {"modules"}} { } test {Test multiple async RM_Call waiting on the same event} { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] $rd1 do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0 $rd2 do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0 @@ -136,7 +136,7 @@ start_server {tags {"modules"}} { } test {async RM_Call inside async RM_Call callback} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd wait_and_do_rm_call blpop l 0 wait_for_blocked_clients_count 1 @@ -161,11 +161,11 @@ start_server {tags {"modules"}} { test {Become replica while having async RM_Call running} { r flushall - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd do_rm_call_async blpop l 0 wait_for_blocked_clients_count 1 - #become a replica of a not existing redis + #become a replica of a not existing server r replicaof localhost 30000 catch {[$rd read]} e @@ -182,7 +182,7 @@ start_server {tags {"modules"}} { test {Pipeline with blocking RM_Call} { r flushall - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set buf "" append buf "do_rm_call_async blpop l 0\r\n" append buf "ping\r\n" @@ -202,7 +202,7 @@ start_server {tags {"modules"}} { test {blocking RM_Call abort} { r flushall - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set client_id [$rd read] @@ -229,7 +229,7 @@ start_server {tags {"modules"}} { r flushall set repl [attach_to_replication_stream] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd do_rm_call_async blpop l 0 wait_for_blocked_clients_count 1 @@ -251,7 +251,7 @@ start_server {tags {"modules"}} { r flushall set repl [attach_to_replication_stream] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blpop_and_set_multiple_keys l x 1 y 2 wait_for_blocked_clients_count 1 @@ -277,7 +277,7 @@ start_server {tags {"modules"}} { r flushall set repl [attach_to_replication_stream] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd do_rm_call_async_no_replicate blpop l 0 wait_for_blocked_clients_count 1 @@ -307,7 +307,7 @@ start_server {tags {"modules"}} { r flushall set repl [attach_to_replication_stream] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blpop_and_set_multiple_keys l string_foo 1 string_bar 2 wait_for_blocked_clients_count 1 @@ -346,7 +346,7 @@ start_server {tags {"modules"}} { r DEBUG SET-ACTIVE-EXPIRE 0 set repl [attach_to_replication_stream] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blpop_and_set_multiple_keys l string_foo 1 string_bar 2 wait_for_blocked_clients_count 1 @@ -421,7 +421,7 @@ start_server {tags {"modules"}} { r module load $testmodule3 test {Test unblock handler on module blocked on keys} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r fsl.push l 1 $rd do_rm_call_async FSL.BPOPGT l 3 0 diff --git a/tests/unit/moduleapi/auth.tcl b/tests/unit/moduleapi/auth.tcl index c7c2def779..aae57535e3 100644 --- a/tests/unit/moduleapi/auth.tcl +++ b/tests/unit/moduleapi/auth.tcl @@ -21,7 +21,7 @@ start_server {tags {"modules"}} { assert_equal [r auth.changecount] 0 r auth.createmoduleuser - # Catch the I/O exception that was thrown when Redis + # Catch the I/O exception that was thrown when the server # disconnected with us. catch { [r ping] } e assert_match {*I/O*} $e diff --git a/tests/unit/moduleapi/blockedclient.tcl b/tests/unit/moduleapi/blockedclient.tcl index 22b2c4bae5..d94ef5c5ba 100644 --- a/tests/unit/moduleapi/blockedclient.tcl +++ b/tests/unit/moduleapi/blockedclient.tcl @@ -114,7 +114,7 @@ foreach call_type {nested normal} { set busy_time_limit 50 set old_time_limit [lindex [r config get busy-reply-threshold] 1] r config set busy-reply-threshold $busy_time_limit - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # run command that blocks until released set start [clock clicks -milliseconds] @@ -151,7 +151,7 @@ foreach call_type {nested normal} { $rd slow_fg_command 200000 } $rd flush - after 10 ;# try to make sure redis started running the command before we proceed + after 10 ;# try to make sure the server started running the command before we proceed # make sure we didn't get BUSY error, it simply blocked till the command was done r ping @@ -171,7 +171,7 @@ foreach call_type {nested normal} { # trigger slow operation r set_slow_bg_operation 1 r hset hash foo bar - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set start [clock clicks -milliseconds] $rd do_bg_rm_call hgetall hash @@ -284,7 +284,7 @@ foreach call_type {nested normal} { test {block time is shorter than timer period} { # This command does not have the reply. - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd unblock_by_timer 100 10 # Wait for the client to unlock. after 120 diff --git a/tests/unit/moduleapi/blockonbackground.tcl b/tests/unit/moduleapi/blockonbackground.tcl index fcd7f1dd44..2d0296357a 100644 --- a/tests/unit/moduleapi/blockonbackground.tcl +++ b/tests/unit/moduleapi/blockonbackground.tcl @@ -96,7 +96,7 @@ start_server {tags {"modules"}} { } test "client unblock works only for modules with timeout support" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set id [$rd read] diff --git a/tests/unit/moduleapi/blockonkeys.tcl b/tests/unit/moduleapi/blockonkeys.tcl index 66a94dcd7c..9014e930cf 100644 --- a/tests/unit/moduleapi/blockonkeys.tcl +++ b/tests/unit/moduleapi/blockonkeys.tcl @@ -4,8 +4,8 @@ start_server {tags {"modules"}} { r module load $testmodule test "Module client blocked on keys: Circular BPOPPUSH" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] r del src dst @@ -23,7 +23,7 @@ start_server {tags {"modules"}} { } test "Module client blocked on keys: Self-referential BPOPPUSH" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] r del src @@ -35,7 +35,7 @@ start_server {tags {"modules"}} { } test "Module client blocked on keys: BPOPPUSH unblocked by timer" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] r del src dst @@ -68,14 +68,14 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (no metadata): Timeout} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpop k 1 assert_equal {Request timedout} [$rd read] } test {Module client blocked on keys (no metadata): Blocked} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpop k 0 wait_for_blocked_clients_count 1 r fsl.push k 34 @@ -90,7 +90,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Timeout} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] r fsl.push k 33 @@ -101,7 +101,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Blocked, case 1} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] r fsl.push k 33 @@ -115,7 +115,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Blocked, case 2} { r del k r fsl.push k 32 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpopgt k 35 0 wait_for_blocked_clients_count 1 r fsl.push k 33 @@ -128,7 +128,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Blocked, DEL} { r del k r fsl.push k 32 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpopgt k 35 0 wait_for_blocked_clients_count 1 r del k @@ -138,7 +138,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Blocked, FLUSHALL} { r del k r fsl.push k 32 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpopgt k 35 0 wait_for_blocked_clients_count 1 r flushall @@ -149,7 +149,7 @@ start_server {tags {"modules"}} { r select 9 r del k r fsl.push k 32 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpopgt k 35 0 wait_for_blocked_clients_count 1 r swapdb 0 9 @@ -164,7 +164,7 @@ start_server {tags {"modules"}} { r select 0 r lpush k 38 r select 9 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpopgt k 35 0 wait_for_blocked_clients_count 1 r swapdb 0 9 @@ -180,7 +180,7 @@ start_server {tags {"modules"}} { r select 0 r fsl.push k 34 r select 9 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpopgt k 35 0 wait_for_blocked_clients_count 1 r swapdb 0 9 @@ -198,7 +198,7 @@ start_server {tags {"modules"}} { r select 0 r fsl.push k 38 r select 9 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpopgt k 35 0 wait_for_blocked_clients_count 1 r swapdb 0 9 @@ -209,7 +209,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Blocked, CLIENT KILL} { r del k r fsl.push k 32 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] $rd fsl.bpopgt k 35 0 @@ -220,7 +220,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Blocked, CLIENT UNBLOCK TIMEOUT} { r del k r fsl.push k 32 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] $rd fsl.bpopgt k 35 0 @@ -232,7 +232,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys (with metadata): Blocked, CLIENT UNBLOCK ERROR} { r del k r fsl.push k 32 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] $rd fsl.bpopgt k 35 0 @@ -243,7 +243,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK TIMEOUT} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] $rd fsl.bpop k 0 NO_TO_CB @@ -254,7 +254,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK ERROR} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] $rd fsl.bpop k 0 NO_TO_CB @@ -265,7 +265,7 @@ start_server {tags {"modules"}} { test {Module client re-blocked on keys after woke up on wrong type} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd fsl.bpop k 0 wait_for_blocked_clients_count 1 r lpush k 12 @@ -279,7 +279,7 @@ start_server {tags {"modules"}} { test {Module client blocked on keys woken up by LPUSH} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blockonkeys.popall k wait_for_blocked_clients_count 1 r lpush k 42 squirrel banana @@ -289,7 +289,7 @@ start_server {tags {"modules"}} { test {Module client unblocks BLPOP} { r del k - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blpop k 3 wait_for_blocked_clients_count 1 r blockonkeys.lpush k 42 @@ -301,7 +301,7 @@ start_server {tags {"modules"}} { r del k r lpush k aa # Module client blocks to pop 5 elements from list - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blockonkeys.blpopn k 5 wait_for_blocked_clients_count 1 # Check that RM_SignalKeyAsReady() can wake up BLPOPN @@ -316,7 +316,7 @@ start_server {tags {"modules"}} { r del k r set somekey someval # Module client blocks to pop 5 elements from list - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blockonkeys.blpopn_or_unblock k 5 0 wait_for_blocked_clients_count 1 # will now cause the module to trigger pop but instead will unblock the client from the reply_callback @@ -342,7 +342,7 @@ start_server {tags {"modules"}} { wait_for_sync $replica test {WAIT command on module blocked client on keys} { - set rd [redis_deferring_client -1] + set rd [valkey_deferring_client -1] $rd set x y $rd read diff --git a/tests/unit/moduleapi/cluster.tcl b/tests/unit/moduleapi/cluster.tcl index 4c0a49d48d..06878c1a01 100644 --- a/tests/unit/moduleapi/cluster.tcl +++ b/tests/unit/moduleapi/cluster.tcl @@ -1,4 +1,4 @@ -# Primitive tests on cluster-enabled redis with modules +# Primitive tests on cluster-enabled server with modules source tests/support/cli.tcl @@ -19,7 +19,7 @@ start_cluster 3 0 [list config_lines $modules] { test "Run blocking command (blocked on key) on cluster node3" { # key9184688 is mapped to slot 10923 (first slot of node 3) - set node3_rd [redis_deferring_client -2] + set node3_rd [valkey_deferring_client -2] $node3_rd fsl.bpop key9184688 0 $node3_rd flush wait_for_condition 50 100 { @@ -30,7 +30,7 @@ start_cluster 3 0 [list config_lines $modules] { } test "Run blocking command (no keys) on cluster node2" { - set node2_rd [redis_deferring_client -1] + set node2_rd [valkey_deferring_client -1] $node2_rd block.block 0 $node2_rd flush @@ -83,7 +83,7 @@ start_cluster 3 0 [list config_lines $modules] { test "Sanity test push cmd after resharding" { assert_error {*MOVED*} {$node3 fsl.push key9184688 1} - set node1_rd [redis_deferring_client 0] + set node1_rd [valkey_deferring_client 0] $node1_rd fsl.bpop key9184688 0 $node1_rd flush @@ -106,7 +106,7 @@ start_cluster 3 0 [list config_lines $modules] { test "Run blocking command (blocked on key) again on cluster node1" { $node1 del key9184688 # key9184688 is mapped to slot 10923 which has been moved to node1 - set node1_rd [redis_deferring_client 0] + set node1_rd [valkey_deferring_client 0] $node1_rd fsl.bpop key9184688 0 $node1_rd flush @@ -118,7 +118,7 @@ start_cluster 3 0 [list config_lines $modules] { } test "Run blocking command (no keys) again on cluster node2" { - set node2_rd [redis_deferring_client -1] + set node2_rd [valkey_deferring_client -1] $node2_rd block.block 0 $node2_rd flush diff --git a/tests/unit/moduleapi/commandfilter.tcl b/tests/unit/moduleapi/commandfilter.tcl index 72b16ec978..52bd6f1cb0 100644 --- a/tests/unit/moduleapi/commandfilter.tcl +++ b/tests/unit/moduleapi/commandfilter.tcl @@ -127,7 +127,7 @@ test {Blocking Commands don't run through command filter when reprocessed} { r lpush list2{t} a b c d e - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # we're asking to pop from the left, but the command filter swaps the two arguments, # if it didn't swap it, we would end up with e d c b a 5 (5 being the left most of the following lpush) # but since we swap the arguments, we end up with 1 e d c b a (1 being the right most of it). diff --git a/tests/unit/moduleapi/datatype.tcl b/tests/unit/moduleapi/datatype.tcl index 951c060e7f..d83fd00da8 100644 --- a/tests/unit/moduleapi/datatype.tcl +++ b/tests/unit/moduleapi/datatype.tcl @@ -64,7 +64,7 @@ start_server {tags {"modules"}} { r config set busy-reply-threshold 5000 ;# make sure we're using a high default # trigger slow loading r datatype.slow_loading 1 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set start [clock clicks -milliseconds] $rd debug reload diff --git a/tests/unit/moduleapi/defrag.tcl b/tests/unit/moduleapi/defrag.tcl index b2e23967ec..e169f8de9b 100644 --- a/tests/unit/moduleapi/defrag.tcl +++ b/tests/unit/moduleapi/defrag.tcl @@ -7,7 +7,7 @@ start_server {tags {"modules"} overrides {{save ""}}} { r config set active-defrag-threshold-lower 0 r config set active-defrag-cycle-min 99 - # try to enable active defrag, it will fail if redis was compiled without it + # try to enable active defrag, it will fail if the server was compiled without it catch {r config set activedefrag yes} e if {[r config get activedefrag] eq "activedefrag yes"} { diff --git a/tests/unit/moduleapi/hooks.tcl b/tests/unit/moduleapi/hooks.tcl index 94b0f6f312..c07f29c846 100644 --- a/tests/unit/moduleapi/hooks.tcl +++ b/tests/unit/moduleapi/hooks.tcl @@ -8,7 +8,7 @@ tags "modules" { test {Test clients connection / disconnection hooks} { for {set j 0} {$j < 2} {incr j} { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 close } assert {[r hooks.event_count client-connected] > 1} @@ -16,7 +16,7 @@ tags "modules" { } test {Test module client change event for blocked client} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # select db other than 0 $rd select 1 # block on key diff --git a/tests/unit/moduleapi/keyspace_events.tcl b/tests/unit/moduleapi/keyspace_events.tcl index 1323b12966..9c1cfa8ba4 100644 --- a/tests/unit/moduleapi/keyspace_events.tcl +++ b/tests/unit/moduleapi/keyspace_events.tcl @@ -76,7 +76,7 @@ tags "modules" { test "Keyspace notifications: module events test" { r config set notify-keyspace-events Kd r del x - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r keyspace.notify x assert_equal {pmessage * __keyspace@9__:x notify} [$rd1 read] diff --git a/tests/unit/moduleapi/misc.tcl b/tests/unit/moduleapi/misc.tcl index da2ca8489c..7bee0ea2b5 100644 --- a/tests/unit/moduleapi/misc.tcl +++ b/tests/unit/moduleapi/misc.tcl @@ -549,7 +549,7 @@ if {[string match {*jemalloc*} [s mem_allocator]]} { test {test RM_Call with large arg for SET command} { # set a big value to trigger increasing the query buf r set foo [string repeat A 100000] - # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) Redis will try to save the query buf itself on the DB. + # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) the server will try to save the query buf itself on the DB. r test.call_generic set bar [string repeat A 33000] # asset the value was trimmed assert {[r memory usage bar] < 42000}; # 42K to count for Jemalloc's additional memory overhead. diff --git a/tests/unit/moduleapi/moduleauth.tcl b/tests/unit/moduleapi/moduleauth.tcl index 82f42f5d1e..d7399d0ff1 100644 --- a/tests/unit/moduleapi/moduleauth.tcl +++ b/tests/unit/moduleapi/moduleauth.tcl @@ -247,8 +247,8 @@ start_server {tags {"modules"}} { test {module auth during blocking module auth} { r config resetstat r acl setuser foo >pwd on ~* &* +@all - set rd [redis_deferring_client] - set rd_two [redis_deferring_client] + set rd [valkey_deferring_client] + set rd_two [valkey_deferring_client] # Attempt blocking module auth. While this ongoing, attempt non blocking module auth from # moduleone/moduletwo and start another blocking module auth from another deferring client. @@ -289,9 +289,9 @@ start_server {tags {"modules"}} { test {Disabling Redis User during blocking module auth} { r config resetstat r acl setuser foo >pwd on ~* &* +@all - set rd [redis_deferring_client] + set rd [valkey_deferring_client] - # Attempt blocking module auth and disable the Redis user while module auth is in progress. + # Attempt blocking module auth and disable the user while module auth is in progress. $rd AUTH foo pwd wait_for_blocked_clients_count 1 r acl setuser foo >pwd off ~* &* +@all @@ -306,7 +306,7 @@ start_server {tags {"modules"}} { test {Killing a client in the middle of blocking module auth} { r config resetstat r acl setuser foo >pwd on ~* &* +@all - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set cid [$rd read] @@ -337,10 +337,10 @@ start_server {tags {"modules"}} { test {test RM_RegisterAuthCallback Module API during blocking module auth} { r config resetstat r acl setuser foo >defaultpwd on ~* &* +@all - set rd [redis_deferring_client] + set rd [valkey_deferring_client] - # Start the module auth attempt with the standard Redis auth password for the user. This - # will result in all module auth cbs attempted and then standard Redis auth will be tried. + # Start the module auth attempt with the standard auth password for the user. This + # will result in all module auth cbs attempted and then standard auth will be tried. $rd AUTH foo defaultpwd wait_for_blocked_clients_count 1 @@ -365,7 +365,7 @@ start_server {tags {"modules"}} { test {Module unload during blocking module auth} { r config resetstat r module load $miscmodule - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r acl setuser foo >pwd on ~* &* +@all # Start a blocking module auth attempt. diff --git a/tests/unit/moduleapi/publish.tcl b/tests/unit/moduleapi/publish.tcl index a6304ea528..f0d67fd051 100644 --- a/tests/unit/moduleapi/publish.tcl +++ b/tests/unit/moduleapi/publish.tcl @@ -4,8 +4,8 @@ start_server {tags {"modules"}} { r module load $testmodule test {PUBLISH and SPUBLISH via a module} { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] assert_equal {1} [ssubscribe $rd1 {chan1}] assert_equal {1} [subscribe $rd2 {chan1}] diff --git a/tests/unit/moduleapi/rdbloadsave.tcl b/tests/unit/moduleapi/rdbloadsave.tcl index 9319c93854..37841aa9aa 100644 --- a/tests/unit/moduleapi/rdbloadsave.tcl +++ b/tests/unit/moduleapi/rdbloadsave.tcl @@ -32,12 +32,12 @@ start_server {tags {"modules"}} { assert_equal [r dbsize] 0 # Send commands with pipeline. First command will call RM_RdbLoad() in - # the command callback. While loading RDB, Redis can go to networking to + # the command callback. While loading RDB, the server can go to networking to # reply -LOADING. By sending commands in pipeline, we verify it doesn't # cause a problem. - # e.g. Redis won't try to process next message of the current client + # e.g. the server won't try to process next message of the current client # while it is in the command callback for that client . - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 test.rdbload blabla.rdb wait_for_condition 50 100 { diff --git a/tests/unit/moduleapi/stream.tcl b/tests/unit/moduleapi/stream.tcl index 7ad1a30598..92c058b51d 100644 --- a/tests/unit/moduleapi/stream.tcl +++ b/tests/unit/moduleapi/stream.tcl @@ -30,7 +30,7 @@ start_server {tags {"modules"}} { r del mystream # Blocking XREAD on an empty key - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 XREAD BLOCK 3000 STREAMS mystream $ # wait until client is actually blocked wait_for_condition 50 100 { @@ -42,7 +42,7 @@ start_server {tags {"modules"}} { assert_equal "{mystream {{$id {field 1 value a}}}}" [$rd1 read] # Blocking XREAD on an existing stream - set rd2 [redis_deferring_client] + set rd2 [valkey_deferring_client] $rd2 XREAD BLOCK 3000 STREAMS mystream $ # wait until client is actually blocked wait_for_condition 50 100 { diff --git a/tests/unit/moduleapi/test_lazyfree.tcl b/tests/unit/moduleapi/test_lazyfree.tcl index 8d2c55abcf..9e1e4680af 100644 --- a/tests/unit/moduleapi/test_lazyfree.tcl +++ b/tests/unit/moduleapi/test_lazyfree.tcl @@ -5,7 +5,7 @@ start_server {tags {"modules"}} { test "modules allocated memory can be reclaimed in the background" { set orig_mem [s used_memory] - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # LAZYFREE_THRESHOLD is 64 for {set i 0} {$i < 10000} {incr i} { diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index 85d20ddf38..16915fef3b 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -68,7 +68,7 @@ start_server {tags {"multi"}} { } {0 0} test {EXEC fails if there are errors while queueing commands #2} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del foo1{t} foo2{t} r multi r set foo1{t} bar1 @@ -523,7 +523,7 @@ start_server {tags {"multi"}} { } {OK} {needs:repl cluster:skip} test {DISCARD should not fail during OOM} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd config set maxmemory 1 assert {[$rd read] eq {OK}} r multi @@ -539,7 +539,7 @@ start_server {tags {"multi"}} { test {MULTI and script timeout} { # check that if MULTI arrives during timeout, it is either refused, or # allowed to pass, and we don't end up executing half of the transaction - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 @@ -564,7 +564,7 @@ start_server {tags {"multi"}} { test {EXEC and script timeout} { # check that if EXEC arrives during timeout, we don't end up executing # half of the transaction, and also that we exit the multi state - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 @@ -589,7 +589,7 @@ start_server {tags {"multi"}} { test {MULTI-EXEC body and script timeout} { # check that we don't run an incomplete transaction due to some commands # arriving during busy script - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 @@ -614,7 +614,7 @@ start_server {tags {"multi"}} { test {just EXEC and script timeout} { # check that if EXEC arrives during timeout, we don't end up executing # actual commands during busy script, and also that we exit the multi state - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] set r2 [redis_client] r config set lua-time-limit 10 r set xx 1 @@ -883,7 +883,7 @@ start_server {tags {"multi"}} { r set foo bar r config set maxmemory bla - # letting the redis parser read it, it'll throw an exception instead of + # letting the server parser read it, it'll throw an exception instead of # reply with an array that contains an error, so we switch to reading # raw RESP instead r readraw 1 diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl index 45efc26b45..b0fd184afe 100644 --- a/tests/unit/obuf-limits.tcl +++ b/tests/unit/obuf-limits.tcl @@ -29,7 +29,7 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} { test {Client output buffer hard limit is enforced} { r config set client-output-buffer-limit {pubsub 100000 0 0} - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 subscribe foo set reply [$rd1 read] @@ -58,7 +58,7 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} { test $test_name { r config set client-output-buffer-limit "pubsub 0 100000 $soft_limit_time" set soft_limit_time [expr $soft_limit_time*1000] - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 client setname test_client set reply [$rd1 read] @@ -124,14 +124,14 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} { } set orig_mem [s used_memory] # Set client name and get all items - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client setname mybiglist assert {[$rd read] eq "OK"} $rd lrange mylist 0 -1 $rd flush after 100 - # Before we read reply, redis will close this client. + # Before we read reply, the server will close this client. set clients [r client list] assert_no_match "*name=mybiglist*" $clients set cur_mem [s used_memory] @@ -143,18 +143,18 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} { assert_equal {} [$rd rawread] } - # Note: This test assumes that what's written with one write, will be read by redis in one read. + # Note: This test assumes that what's written with one write, will be read by the server in one read. # this assumption is wrong, but seem to work empirically (for now) test {No response for multi commands in pipeline if client output buffer limit is enforced} { r config set client-output-buffer-limit {normal 100000 0 0} set value [string repeat "x" 10000] r set bigkey $value - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] $rd2 client setname multicommands assert_equal "OK" [$rd2 read] - # Let redis sleep 1s firstly + # Let the server sleep 1s firstly $rd1 debug sleep 1 $rd1 flush after 100 @@ -162,7 +162,7 @@ start_server {tags {"obuf-limits external:skip logreqres:skip"}} { # Create a pipeline of commands that will be processed in one socket read. # It is important to use one write, in TLS mode independent writes seem # to wait for response from the server. - # Total size should be less than OS socket buffer, redis can + # Total size should be less than OS socket buffer, the server can # execute all commands in this pipeline when it wakes up. set buf "" for {set i 0} {$i < 15} {incr i} { diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl index 422fc76d6a..31e4e563b9 100644 --- a/tests/unit/other.tcl +++ b/tests/unit/other.tcl @@ -308,7 +308,7 @@ start_server {tags {"other"}} { } {} {needs:reset} test {RESET clears MONITOR state} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd monitor assert_equal [$rd read] "OK" diff --git a/tests/unit/pause.tcl b/tests/unit/pause.tcl index e30f922e67..d27f5775c4 100644 --- a/tests/unit/pause.tcl +++ b/tests/unit/pause.tcl @@ -1,7 +1,7 @@ start_server {tags {"pause network"}} { test "Test read commands are not blocked by client pause" { r client PAUSE 100000 WRITE - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd GET FOO $rd PING $rd INFO @@ -24,7 +24,7 @@ start_server {tags {"pause network"}} { # paused only WRITE. This is because the first 'PAUSE ALL' command is # more restrictive than the second 'PAUSE WRITE' and pause-client feature # preserve most restrictive configuration among multiple settings. - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd SET FOO BAR set test_start_time [clock milliseconds] @@ -40,7 +40,7 @@ start_server {tags {"pause network"}} { r client PAUSE 60000 WRITE r client PAUSE 10 WRITE after 100 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd SET FOO BAR wait_for_blocked_clients_count 1 100 10 @@ -52,7 +52,7 @@ start_server {tags {"pause network"}} { test "Test write commands are paused by RO" { r client PAUSE 60000 WRITE - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd SET FOO BAR wait_for_blocked_clients_count 1 50 100 @@ -66,13 +66,13 @@ start_server {tags {"pause network"}} { r client PAUSE 100000 WRITE # Test that pfcount, which can replicate, is also blocked - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd PFCOUNT pause-hll wait_for_blocked_clients_count 1 50 100 # Test that publish, which adds the message to the replication # stream is blocked. - set rd2 [redis_deferring_client] + set rd2 [valkey_deferring_client] $rd2 publish foo bar wait_for_blocked_clients_count 2 50 100 @@ -97,7 +97,7 @@ start_server {tags {"pause network"}} { } test "Test write multi-execs are blocked by pause RO" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd MULTI assert_equal [$rd read] "OK" $rd SET FOO BAR @@ -112,8 +112,8 @@ start_server {tags {"pause network"}} { test "Test scripts are blocked by pause RO" { r client PAUSE 60000 WRITE - set rd [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd [valkey_deferring_client] + set rd2 [valkey_deferring_client] $rd EVAL "return 1" 0 # test a script with a shebang and no flags for coverage @@ -194,8 +194,8 @@ start_server {tags {"pause network"}} { } test "Test write scripts in multi-exec are blocked by pause RO" { - set rd [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd [valkey_deferring_client] + set rd2 [valkey_deferring_client] # one with a shebang $rd MULTI @@ -240,7 +240,7 @@ start_server {tags {"pause network"}} { test "Test multiple clients can be queued up and unblocked" { r client PAUSE 60000 WRITE - set clients [list [redis_deferring_client] [redis_deferring_client] [redis_deferring_client]] + set clients [list [valkey_deferring_client] [valkey_deferring_client] [valkey_deferring_client]] foreach client $clients { $client SET FOO BAR } @@ -294,7 +294,7 @@ start_server {tags {"pause network"}} { r SET FOO2{t} BAR r exec - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd SET FOO3{t} BAR wait_for_blocked_clients_count 1 50 100 diff --git a/tests/unit/protocol.tcl b/tests/unit/protocol.tcl index e3b4115a8a..cd7dbe9e5a 100644 --- a/tests/unit/protocol.tcl +++ b/tests/unit/protocol.tcl @@ -234,7 +234,7 @@ start_server {tags {"protocol network"}} { start_server {tags {"regression"}} { test "Regression for a crash with blocking ops and pipelining" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set fd [r channel] set proto "*3\r\n\$5\r\nBLPOP\r\n\$6\r\nnolist\r\n\$1\r\n0\r\n" puts -nonewline $fd $proto$proto diff --git a/tests/unit/pubsub.tcl b/tests/unit/pubsub.tcl index 3797b00c7f..5a2439814f 100644 --- a/tests/unit/pubsub.tcl +++ b/tests/unit/pubsub.tcl @@ -6,7 +6,7 @@ start_server {tags {"pubsub network"}} { } foreach resp {2 3} { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] if {[lsearch $::denytags "resp3"] >= 0} { if {$resp == 3} {continue} } elseif {$::force_resp3} { @@ -42,7 +42,7 @@ start_server {tags {"pubsub network"}} { } test "PUBLISH/SUBSCRIBE basics" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] # subscribe to two channels assert_equal {1 2} [subscribe $rd1 {chan1 chan2}] @@ -67,8 +67,8 @@ start_server {tags {"pubsub network"}} { } test "PUBLISH/SUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] assert_equal {1} [subscribe $rd1 {chan1}] assert_equal {1} [subscribe $rd2 {chan1}] @@ -82,7 +82,7 @@ start_server {tags {"pubsub network"}} { } test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1 2 3} [subscribe $rd1 {chan1 chan2 chan3}] unsubscribe $rd1 assert_equal 0 [r publish chan1 hello] @@ -94,7 +94,7 @@ start_server {tags {"pubsub network"}} { } test "SUBSCRIBE to one channel more than once" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1 1 1} [subscribe $rd1 {chan1 chan1 chan1}] assert_equal 1 [r publish chan1 hello] assert_equal {message chan1 hello} [$rd1 read] @@ -104,7 +104,7 @@ start_server {tags {"pubsub network"}} { } test "UNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}] # clean up clients @@ -112,7 +112,7 @@ start_server {tags {"pubsub network"}} { } test "PUBLISH/PSUBSCRIBE basics" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] # subscribe to two patterns assert_equal {1 2} [psubscribe $rd1 {foo.* bar.*}] @@ -140,8 +140,8 @@ start_server {tags {"pubsub network"}} { } test "PUBLISH/PSUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 {chan.*}] assert_equal {1} [psubscribe $rd2 {chan.*}] @@ -155,7 +155,7 @@ start_server {tags {"pubsub network"}} { } test "PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1 2 3} [psubscribe $rd1 {chan1.* chan2.* chan3.*}] punsubscribe $rd1 assert_equal 0 [r publish chan1.hi hello] @@ -167,7 +167,7 @@ start_server {tags {"pubsub network"}} { } test "PubSub messages with CLIENT REPLY OFF" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd hello 3 $rd read ;# Discard the hello reply @@ -191,7 +191,7 @@ start_server {tags {"pubsub network"}} { } {0} {resp3} test "PUNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}] # clean up clients @@ -203,8 +203,8 @@ start_server {tags {"pubsub network"}} { } {abc 0 def 0} test "NUMPATs returns the number of unique patterns" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] # Three unique patterns and one that overlaps psubscribe $rd1 "foo*" @@ -223,7 +223,7 @@ start_server {tags {"pubsub network"}} { } test "Mix SUBSCRIBE and PSUBSCRIBE" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [subscribe $rd1 {foo.bar}] assert_equal {2} [psubscribe $rd1 {foo.*}] @@ -249,7 +249,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: we receive keyspace notifications" { r config set notify-keyspace-events KA - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 CLIENT REPLY OFF ;# Make sure it works even if replies are silenced assert_equal {1} [psubscribe $rd1 *] r set foo bar @@ -259,7 +259,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: we receive keyevent notifications" { r config set notify-keyspace-events EA - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 CLIENT REPLY SKIP ;# Make sure it works even if replies are silenced assert_equal {1} [psubscribe $rd1 *] r set foo bar @@ -269,7 +269,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: we can receive both kind of events" { r config set notify-keyspace-events KEA - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] $rd1 CLIENT REPLY ON ;# Just coverage assert_equal {OK} [$rd1 read] assert_equal {1} [psubscribe $rd1 *] @@ -282,7 +282,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: we are able to mask events" { r config set notify-keyspace-events KEl r del mylist - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r set foo bar r lpush mylist a @@ -294,7 +294,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: general events test" { r config set notify-keyspace-events KEg - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r set foo bar r expire foo 1 @@ -309,7 +309,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: list events test" { r config set notify-keyspace-events KEl r del mylist - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r lpush mylist a r rpush mylist a @@ -326,7 +326,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: set events test" { r config set notify-keyspace-events Ks r del myset - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r sadd myset a b c d r srem myset x @@ -341,7 +341,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: zset events test" { r config set notify-keyspace-events Kz r del myzset - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r zadd myzset 1 a 2 b r zrem myzset x @@ -356,7 +356,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: hash events test" { r config set notify-keyspace-events Kh r del myhash - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r hmset myhash yes 1 no 0 r hincrby myhash yes 10 @@ -368,7 +368,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: stream events test" { r config set notify-keyspace-events Kt r del mystream - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r xgroup create mystream mygroup $ mkstream r xgroup createconsumer mystream mygroup Bob @@ -392,7 +392,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: expired events (triggered expire)" { r config set notify-keyspace-events Ex r del foo - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r psetex foo 100 1 wait_for_condition 50 100 { @@ -407,7 +407,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: expired events (background expire)" { r config set notify-keyspace-events Ex r del foo - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r psetex foo 100 1 assert_equal "pmessage * __keyevent@${db}__:expired foo" [$rd1 read] @@ -418,7 +418,7 @@ start_server {tags {"pubsub network"}} { r config set notify-keyspace-events Ee r config set maxmemory-policy allkeys-lru r flushdb - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r set foo bar r config set maxmemory 1 @@ -441,7 +441,7 @@ start_server {tags {"pubsub network"}} { test "Keyspace notifications: new key test" { r config set notify-keyspace-events En - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [psubscribe $rd1 *] r set foo bar # second set of foo should not cause a 'new' event diff --git a/tests/unit/pubsubshard.tcl b/tests/unit/pubsubshard.tcl index 6e3fb61c1c..d56f36ffaa 100644 --- a/tests/unit/pubsubshard.tcl +++ b/tests/unit/pubsubshard.tcl @@ -1,6 +1,6 @@ start_server {tags {"pubsubshard external:skip"}} { test "SPUBLISH/SSUBSCRIBE basics" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] # subscribe to two channels assert_equal {1} [ssubscribe $rd1 {chan1}] @@ -26,8 +26,8 @@ start_server {tags {"pubsubshard external:skip"}} { } test "SPUBLISH/SSUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] assert_equal {1} [ssubscribe $rd1 {chan1}] assert_equal {1} [ssubscribe $rd2 {chan1}] @@ -41,7 +41,7 @@ start_server {tags {"pubsubshard external:skip"}} { } test "SPUBLISH/SSUBSCRIBE after UNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1} [ssubscribe $rd1 {chan1}] assert_equal {2} [ssubscribe $rd1 {chan2}] assert_equal {3} [ssubscribe $rd1 {chan3}] @@ -55,7 +55,7 @@ start_server {tags {"pubsubshard external:skip"}} { } test "SSUBSCRIBE to one channel more than once" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {1 1 1} [ssubscribe $rd1 {chan1 chan1 chan1}] assert_equal 1 [r SPUBLISH chan1 hello] assert_equal {smessage chan1 hello} [$rd1 read] @@ -65,7 +65,7 @@ start_server {tags {"pubsubshard external:skip"}} { } test "SUNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] + set rd1 [valkey_deferring_client] assert_equal {0} [sunsubscribe $rd1 {foo}] assert_equal {0} [sunsubscribe $rd1 {bar}] assert_equal {0} [sunsubscribe $rd1 {quux}] @@ -79,8 +79,8 @@ start_server {tags {"pubsubshard external:skip"}} { } {abc 0 def 0} test "SPUBLISH/SSUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] assert_equal {1} [ssubscribe $rd1 {chan1}] assert_equal {1} [ssubscribe $rd2 {chan1}] @@ -94,8 +94,8 @@ start_server {tags {"pubsubshard external:skip"}} { } test "SPUBLISH/SSUBSCRIBE with PUBLISH/SUBSCRIBE" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] assert_equal {1} [ssubscribe $rd1 {chan1}] assert_equal {1} [subscribe $rd2 {chan1}] @@ -111,7 +111,7 @@ start_server {tags {"pubsubshard external:skip"}} { } test "PubSubShard with CLIENT REPLY OFF" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd hello 3 $rd read ;# Discard the hello reply @@ -151,8 +151,8 @@ start_server {tags {"pubsubshard external:skip"}} { } test {publish message to master and receive on replica} { - set rd0 [redis_deferring_client node_0_host node_0_port] - set rd1 [redis_deferring_client node_1_host node_1_port] + set rd0 [valkey_deferring_client node_0_host node_0_port] + set rd1 [valkey_deferring_client node_1_host node_1_port] assert_equal {1} [ssubscribe $rd1 {chan1}] $rd0 SPUBLISH chan1 hello diff --git a/tests/unit/scan.tcl b/tests/unit/scan.tcl index d980a52adb..49939288fb 100644 --- a/tests/unit/scan.tcl +++ b/tests/unit/scan.tcl @@ -109,7 +109,7 @@ proc test_scan {type} { after 2 - # TODO: remove this in redis 8.0 + # TODO: remove this in server version 8.0 set cur 0 set keys {} while 1 { @@ -124,7 +124,7 @@ proc test_scan {type} { # make sure that expired key have been removed by scan command assert_equal 1000 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d] - # TODO: uncomment in redis 8.0 + # TODO: uncomment in server version 8.0 #assert_error "*unknown type name*" {r scan 0 type "string1"} # expired key will be no touched by scan command #assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d] @@ -193,7 +193,7 @@ proc test_scan {type} { # make sure that expired key have been removed by scan command assert_equal 1000 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d] - # TODO: uncomment in redis 8.0 + # TODO: uncomment in server version 8.0 # make sure that only the expired key in the type match will been removed by scan command #assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d] diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index feef69122d..65927dffa6 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -377,7 +377,7 @@ start_server {tags {"scripting"}} { test {EVAL - JSON numeric decoding} { # We must return the table as a string because otherwise - # Redis converts floats to ints and we get 0 and 1023 instead + # the server converts floats to ints and we get 0 and 1023 instead # of 0.0003 and 1023.2 as the parsed output. run_script {return table.concat( @@ -770,7 +770,7 @@ start_server {tags {"scripting"}} { r script flush ;# reset Lua VM r set x 0 # Use a non blocking client to speedup the loop. - set rd [redis_deferring_client] + set rd [valkey_deferring_client] for {set j 0} {$j < 10000} {incr j} { run_script_on_connection $rd {return redis.call("incr",KEYS[1])} 1 x } @@ -1138,7 +1138,7 @@ start_server {tags {"scripting"}} { # instance at all. start_server {tags {"scripting"}} { test {Timedout read-only scripts can be killed by SCRIPT KILL} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r config set lua-time-limit 10 run_script_on_connection $rd {while true do end} 0 after 200 @@ -1151,7 +1151,7 @@ start_server {tags {"scripting"}} { } test {Timedout read-only scripts can be killed by SCRIPT KILL even when use pcall} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r config set lua-time-limit 10 run_script_on_connection $rd {local f = function() while 1 do redis.call('ping') end end while 1 do pcall(f) end} 0 @@ -1179,7 +1179,7 @@ start_server {tags {"scripting"}} { } test {Timedout script does not cause a false dead client} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r config set lua-time-limit 10 # senging (in a pipeline): @@ -1240,8 +1240,8 @@ start_server {tags {"scripting"}} { r config set appendonly yes # create clients, and set one to block waiting for key 'x' - set rd [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd [valkey_deferring_client] + set rd2 [valkey_deferring_client] set r3 [redis_client] $rd2 blpop x 0 wait_for_blocked_clients_count 1 @@ -1280,7 +1280,7 @@ start_server {tags {"scripting"}} { } {OK} {external:skip needs:debug} test {Timedout scripts that modified data can't be killed by SCRIPT KILL} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r config set lua-time-limit 10 run_script_on_connection $rd {redis.call('set',KEYS[1],'y'); while true do end} 1 x after 200 @@ -1300,7 +1300,7 @@ start_server {tags {"scripting"}} { assert_match {BUSY*} $e catch {r shutdown nosave} # Make sure the server was killed - catch {set rd [redis_deferring_client]} e + catch {set rd [valkey_deferring_client]} e assert_match {*connection refused*} $e } {} {external:skip} } @@ -1348,7 +1348,7 @@ start_server {tags {"scripting"}} { } ;# is_eval test "Replication of script multiple pushes to list with BLPOP" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd brpop a 0 run_script { redis.call("lpush",KEYS[1],"1"); @@ -1414,7 +1414,7 @@ start_server {tags {"scripting repl external:skip"}} { } } - # replicate_commands is the default on Redis Function + # replicate_commands is the default on server Functions test "Redis.replicate_commands() can be issued anywhere now" { r eval { redis.call('set','foo','bar'); @@ -2125,7 +2125,7 @@ start_server {tags {"scripting"}} { # run a slow script that does one write, then waits for INFO to indicate # that the replica dropped, and then runs another write - set rd [redis_deferring_client -1] + set rd [valkey_deferring_client -1] $rd eval { redis.call('set','x',"script value") while true do @@ -2227,7 +2227,7 @@ start_server {tags {"scripting"}} { test "Consistent eval error reporting" { r config resetstat r config set maxmemory 1 - # Script aborted due to Redis state (OOM) should report script execution error with detailed internal error + # Script aborted due to server state (OOM) should report script execution error with detailed internal error assert_error {OOM command not allowed when used memory > 'maxmemory'*} { r eval {return redis.call('set','x','y')} 1 x } @@ -2236,7 +2236,7 @@ start_server {tags {"scripting"}} { assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r] assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r] - # redis.pcall() failure due to Redis state (OOM) returns lua error table with Redis error message without '-' prefix + # redis.pcall() failure due to server state (OOM) returns lua error table with server error message without '-' prefix r config resetstat assert_equal [ r eval { @@ -2268,7 +2268,7 @@ start_server {tags {"scripting"}} { r config set maxmemory 0 r config resetstat - # Script aborted due to error result of Redis command + # Script aborted due to error result of server command assert_error {ERR DB index is out of range*} { r eval {return redis.call('select',99)} 0 } @@ -2277,7 +2277,7 @@ start_server {tags {"scripting"}} { assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat select r] assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r] - # redis.pcall() failure due to error in Redis command returns lua error table with redis error message without '-' prefix + # redis.pcall() failure due to error in server command returns lua error table with server error message without '-' prefix r config resetstat assert_equal [ r eval { @@ -2304,7 +2304,7 @@ start_server {tags {"scripting"}} { assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r] assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval_ro r] - # redis.pcall() failure due to scripting specific error state (write cmd with eval_ro) returns lua error table with Redis error message without '-' prefix + # redis.pcall() failure due to scripting specific error state (write cmd with eval_ro) returns lua error table with server error message without '-' prefix r config resetstat assert_equal [ r eval_ro { diff --git a/tests/unit/shutdown.tcl b/tests/unit/shutdown.tcl index 7504851a13..79606c5aea 100644 --- a/tests/unit/shutdown.tcl +++ b/tests/unit/shutdown.tcl @@ -22,7 +22,7 @@ start_server {tags {"shutdown external:skip"}} { catch {r shutdown nosave} # Make sure the server was killed - catch {set rd [redis_deferring_client]} e + catch {set rd [valkey_deferring_client]} e assert_match {*connection refused*} $e # Temp rdb file must be deleted diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl index e7f82ce7f9..547fa2d452 100644 --- a/tests/unit/slowlog.tcl +++ b/tests/unit/slowlog.tcl @@ -143,7 +143,7 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { assert_equal {INCRBYFLOAT A 1.0} [lindex [lindex [r slowlog get] 0] 3] # blocked BLPOP is replicated as LPOP - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd blpop l 0 wait_for_blocked_clients_count 1 50 100 r multi @@ -231,7 +231,7 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { # Cleanup first r del mylist # create a test client - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # config the slowlog and reset r config set slowlog-log-slower-than 0 diff --git a/tests/unit/sort.tcl b/tests/unit/sort.tcl index a46f77cf9b..397e7e12ea 100644 --- a/tests/unit/sort.tcl +++ b/tests/unit/sort.tcl @@ -12,7 +12,7 @@ start_server { r del tosort for {set i 0} {$i < $num} {incr i} { # Make sure all the weights are different because - # Redis does not use a stable sort but Tcl does. + # the server does not use a stable sort but Tcl does. while 1 { randpath { set rint [expr int(rand()*1000000)] diff --git a/tests/unit/tracking.tcl b/tests/unit/tracking.tcl index 666b5930e4..427cc580dd 100644 --- a/tests/unit/tracking.tcl +++ b/tests/unit/tracking.tcl @@ -2,7 +2,7 @@ start_server {tags {"tracking network logreqres:skip"}} { # Create a deferred client we'll use to redirect invalidation # messages to. - set rd_redirection [redis_deferring_client] + set rd_redirection [valkey_deferring_client] $rd_redirection client id set redir_id [$rd_redirection read] $rd_redirection subscribe __redis__:invalidate @@ -10,7 +10,7 @@ start_server {tags {"tracking network logreqres:skip"}} { # Create another client that's not used as a redirection client # We should always keep this client's buffer clean - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # Client to be used for SET and GET commands # We don't read this client's buffer @@ -24,8 +24,8 @@ start_server {tags {"tracking network logreqres:skip"}} { r CLIENT TRACKING off $rd QUIT $rd_redirection QUIT - set rd [redis_deferring_client] - set rd_redirection [redis_deferring_client] + set rd [valkey_deferring_client] + set rd_redirection [valkey_deferring_client] $rd_redirection client id set redir_id [$rd_redirection read] $rd_redirection subscribe __redis__:invalidate @@ -269,7 +269,7 @@ start_server {tags {"tracking network logreqres:skip"}} { assert_equal PONG [r read] # Reinstantiating after QUIT - set rd_redirection [redis_deferring_client] + set rd_redirection [valkey_deferring_client] $rd_redirection CLIENT ID set redir_id [$rd_redirection read] $rd_redirection SUBSCRIBE __redis__:invalidate @@ -745,7 +745,7 @@ start_server {tags {"tracking network logreqres:skip"}} { test {Regression test for #11715} { # This issue manifests when a client invalidates keys through the max key - # limit, which invalidates keys to get Redis below the limit, but no command is + # limit, which invalidates keys to get the server below the limit, but no command is # then executed. This can occur in several ways but the simplest is through # multi-exec which queues commands. clean_all @@ -814,7 +814,7 @@ start_server {tags {"tracking network logreqres:skip"}} { test {RESP3 based basic redirect invalidation with client reply off} { clean_all - set rd_redir [redis_deferring_client] + set rd_redir [valkey_deferring_client] $rd_redir hello 3 $rd_redir read @@ -880,7 +880,7 @@ start_server {tags {"tracking network logreqres:skip"}} { # run the full tracking unit in that mode start_server {tags {"tracking network"}} { test {Coverage: Basic CLIENT CACHING} { - set rd_redirection [redis_deferring_client] + set rd_redirection [valkey_deferring_client] $rd_redirection client id set redir_id [$rd_redirection read] assert_equal {OK} [r CLIENT TRACKING on OPTIN REDIRECT $redir_id] diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index 5ea62cb910..d9870b8062 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -645,7 +645,7 @@ foreach {type large} [array get largevalue] { foreach {type large} [array get largevalue] { foreach {pop} {BLPOP BLMPOP_LEFT} { test "$pop: single existing list - $type" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] create_$type blist "a b $large c d" bpop_command $rd $pop blist 1 @@ -671,7 +671,7 @@ foreach {type large} [array get largevalue] { } test "$pop: multiple existing lists - $type" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] create_$type blist1{t} "a $large c" create_$type blist2{t} "d $large f" @@ -700,7 +700,7 @@ foreach {type large} [array get largevalue] { } test "$pop: second list has an entry - $type" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist1{t} create_$type blist2{t} "d $large f" @@ -722,7 +722,7 @@ foreach {type large} [array get largevalue] { r del target{t} r rpush target{t} bar - set rd [redis_deferring_client] + set rd [valkey_deferring_client] create_$type blist{t} "a b $large c d" $rd brpoplpush blist{t} target{t} 1 @@ -739,7 +739,7 @@ foreach {type large} [array get largevalue] { r del target{t} r rpush target{t} bar - set rd [redis_deferring_client] + set rd [valkey_deferring_client] create_$type blist{t} "a b $large c d" $rd blmove blist{t} target{t} $wherefrom $whereto 1 @@ -766,7 +766,7 @@ foreach {type large} [array get largevalue] { foreach {pop} {BLPOP BLMPOP_LEFT} { test "$pop, LPUSH + DEL should not awake blocked client" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del list bpop_command $rd $pop list 0 @@ -783,7 +783,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "$pop, LPUSH + DEL + SET should not awake blocked client" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del list bpop_command $rd $pop list 0 @@ -802,7 +802,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "BLPOP with same key multiple times should work (issue #801)" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del list1{t} list2{t} # Data arriving after the BLPOP. @@ -827,7 +827,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { foreach {pop} {BLPOP BLMPOP_LEFT} { test "MULTI/EXEC is isolated from the point of view of $pop" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del list bpop_command $rd $pop list 0 @@ -843,7 +843,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "$pop with variadic LPUSH" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist bpop_command $rd $pop blist 0 wait_for_blocked_client @@ -855,7 +855,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "BRPOPLPUSH with zero timeout should block indefinitely" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist{t} target{t} r rpush target{t} bar $rd brpoplpush blist{t} target{t} 0 @@ -869,7 +869,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { foreach wherefrom {left right} { foreach whereto {left right} { test "BLMOVE $wherefrom $whereto with zero timeout should block indefinitely" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist{t} target{t} r rpush target{t} bar $rd blmove blist{t} target{t} $wherefrom $whereto 0 @@ -889,8 +889,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { foreach wherefrom {left right} { foreach whereto {left right} { test "BLMOVE ($wherefrom, $whereto) with a client BLPOPing the target list" { - set rd [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd [valkey_deferring_client] + set rd2 [valkey_deferring_client] r del blist{t} target{t} $rd2 blpop target{t} 0 wait_for_blocked_clients_count 1 @@ -907,7 +907,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "BRPOPLPUSH with wrong source type" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist{t} target{t} r set blist{t} nolist $rd brpoplpush blist{t} target{t} 1 @@ -916,7 +916,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "BRPOPLPUSH with wrong destination type" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist{t} target{t} r set target{t} nolist r lpush blist{t} foo @@ -924,7 +924,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { assert_error "WRONGTYPE*" {$rd read} $rd close - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist{t} target{t} r set target{t} nolist $rd brpoplpush blist{t} target{t} 0 @@ -936,7 +936,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "BRPOPLPUSH maintains order of elements after failure" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist{t} target{t} r set target{t} nolist $rd brpoplpush blist{t} target{t} 0 @@ -948,8 +948,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } {a b c} test "BRPOPLPUSH with multiple blocked clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] r del blist{t} target1{t} target2{t} r set target1{t} nolist $rd1 brpoplpush blist{t} target1{t} 0 @@ -966,10 +966,10 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "BLMPOP with multiple blocked clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] - set rd4 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] + set rd3 [valkey_deferring_client] + set rd4 [valkey_deferring_client] r del blist{t} blist2{t} $rd1 blmpop 0 2 blist{t} blist2{t} left count 1 @@ -999,8 +999,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "Linked LMOVEs" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] r del list1{t} list2{t} list3{t} @@ -1019,8 +1019,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "Circular BRPOPLPUSH" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] r del list1{t} list2{t} @@ -1038,7 +1038,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "Self-referential BRPOPLPUSH" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist{t} @@ -1066,8 +1066,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } {foo bar {} {} {bar foo}} test "PUSH resulting from BRPOPLPUSH affect WATCH" { - set blocked_client [redis_deferring_client] - set watching_client [redis_deferring_client] + set blocked_client [valkey_deferring_client] + set watching_client [valkey_deferring_client] r del srclist{t} dstlist{t} somekey{t} r set somekey{t} somevalue $blocked_client brpoplpush srclist{t} dstlist{t} 0 @@ -1087,8 +1087,8 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } {} test "BRPOPLPUSH does not affect WATCH while still blocked" { - set blocked_client [redis_deferring_client] - set watching_client [redis_deferring_client] + set blocked_client [valkey_deferring_client] + set watching_client [valkey_deferring_client] r del srclist{t} dstlist{t} somekey{t} r set somekey{t} somevalue $blocked_client brpoplpush srclist{t} dstlist{t} 0 @@ -1109,7 +1109,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } {somevalue} test {BRPOPLPUSH timeout} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd brpoplpush foo_list{t} bar_list{t} 1 wait_for_blocked_clients_count 1 @@ -1124,7 +1124,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { r select 1 r rpush k hello r select 9 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd brpop k 5 wait_for_blocked_clients_count 1 r swapdb 1 9 @@ -1138,7 +1138,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { r select 1 r rpush k hello r pexpire k 100 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd deferred 0 $rd select 9 set id [$rd client id] @@ -1184,7 +1184,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { r flushall r debug set-active-expire 0 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set id [$rd read] $rd brpop k 0 @@ -1224,7 +1224,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { test {BLPOP unblock but the key is expired and then block again - reprocessing command} { r flushall r debug set-active-expire 0 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set start [clock milliseconds] $rd blpop mylist 1 @@ -1251,7 +1251,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { foreach {pop} {BLPOP BLMPOP_LEFT} { test "$pop when new key is moved into place" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del foo{t} bpop_command $rd $pop foo{t} 0 @@ -1264,7 +1264,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } {foo{t} hij} test "$pop when result key is created by SORT..STORE" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # zero out list from previous test without explicit delete r lpop foo{t} @@ -1291,7 +1291,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { foreach {pop} {BLPOP BRPOP BLMPOP_LEFT BLMPOP_RIGHT} { test "$pop: with single empty list argument" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist1 bpop_command $rd $pop blist1 1 wait_for_blocked_client @@ -1302,14 +1302,14 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "$pop: with negative timeout" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] bpop_command $rd $pop blist1 -1 assert_error "ERR *is negative*" {$rd read} $rd close } test "$pop: with non-integer timeout" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist1 bpop_command $rd $pop blist1 0.1 r rpush blist1 foo @@ -1321,7 +1321,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { test "$pop: with zero timeout should block indefinitely" { # To test this, use a timeout of 0 and wait a second. # The blocking pop should still be waiting for a push. - set rd [redis_deferring_client] + set rd [valkey_deferring_client] bpop_command $rd $pop blist1 0 wait_for_blocked_client r rpush blist1 foo @@ -1332,7 +1332,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { test "$pop: with 0.001 timeout should not block indefinitely" { # Use a timeout of 0.001 and wait for the number of blocked clients to equal 0. # Validate the empty read from the deferring client. - set rd [redis_deferring_client] + set rd [valkey_deferring_client] bpop_command $rd $pop blist1 0.001 wait_for_blocked_clients_count 0 assert_equal {} [$rd read] @@ -1340,7 +1340,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "$pop: second argument is not a list" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist1{t} blist2{t} r set blist2{t} nolist{t} bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 @@ -1349,7 +1349,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "$pop: timeout" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist1{t} blist2{t} bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 wait_for_blocked_client @@ -1358,7 +1358,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test "$pop: arguments are empty" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del blist1{t} blist2{t} bpop_command_two_key $rd $pop blist1{t} blist2{t} 1 @@ -1393,7 +1393,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} { } test {BLMPOP propagate as pop with count command to replica} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set repl [attach_to_replication_stream] # BLMPOP without being blocked. @@ -2009,8 +2009,8 @@ foreach {type large} [array get largevalue] { } test "Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] $rd1 brpoplpush a{t} b{t} 0 $rd1 brpoplpush a{t} b{t} 0 @@ -2025,7 +2025,7 @@ foreach {type large} [array get largevalue] { test "BLPOP/BLMOVE should increase dirty" { r del lst{t} lst1{t} - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set dirty [s rdb_changes_since_last_save] $rd blpop lst{t} 0 @@ -2049,7 +2049,7 @@ foreach {type large} [array get largevalue] { foreach {pop} {BLPOP BLMPOP_RIGHT} { test "client unblock tests" { r del l - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set id [$rd read] @@ -2261,8 +2261,8 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { } {12 0 9223372036854775808 2147483647 32767 127} test "Unblock fairness is kept while pipelining" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] # delete the list in case already exists r del mylist @@ -2296,9 +2296,9 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { } test "Unblock fairness is kept during nested unblock" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] + set rd3 [valkey_deferring_client] # delete the list in case already exists r del l1{t} l2{t} l3{t} @@ -2334,7 +2334,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { r del mylist # create a test client - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # reset the server stats r config resetstat @@ -2357,7 +2357,7 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { r del mylist # create a test client - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd client id set id [$rd read] @@ -2380,9 +2380,9 @@ foreach {pop} {BLPOP BLMPOP_RIGHT} { r del src{t} dst{t} key1{t} key2{t} key3{t} set repl [attach_to_replication_stream] - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] + set rd3 [valkey_deferring_client] $rd1 blmove src{t} dst{t} left right 0 wait_for_blocked_clients_count 1 diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl index a86488a78c..2cd812e521 100644 --- a/tests/unit/type/stream-cgroups.tcl +++ b/tests/unit/type/stream-cgroups.tcl @@ -221,7 +221,7 @@ start_server { assert {[lindex $res 0 1 0] == {666-0 {f v}}} r XADD mystream 667 f2 v2 r XDEL mystream 667 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 10 STREAMS mystream ">" wait_for_blocked_clients_count 0 assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {mystream {}} @@ -232,7 +232,7 @@ start_server { r DEL mystream r XADD mystream 666 f v r XGROUP CREATE mystream mygroup $ - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" wait_for_blocked_clients_count 1 r DEL mystream @@ -244,7 +244,7 @@ start_server { r DEL mystream r XADD mystream 666 f v r XGROUP CREATE mystream mygroup $ - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" wait_for_blocked_clients_count 1 r SET mystream val1 @@ -256,7 +256,7 @@ start_server { r DEL mystream r XADD mystream 666 f v r XGROUP CREATE mystream mygroup $ - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" wait_for_blocked_clients_count 1 r MULTI @@ -271,7 +271,7 @@ start_server { r DEL mystream r XADD mystream 666 f v r XGROUP CREATE mystream mygroup $ - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" wait_for_blocked_clients_count 1 r FLUSHALL @@ -286,7 +286,7 @@ start_server { r DEL mystream r XADD mystream 666 f v r XGROUP CREATE mystream mygroup $ - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd SELECT 9 $rd read $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" @@ -304,7 +304,7 @@ start_server { r DEL mystream r XADD mystream 666 f v r XGROUP CREATE mystream mygroup $ - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd SELECT 9 $rd read $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" @@ -325,7 +325,7 @@ start_server { test {Blocking XREAD: key deleted} { r DEL mystream r XADD mystream 666 f v - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 0 STREAMS mystream "$" wait_for_blocked_clients_count 1 r DEL mystream @@ -339,7 +339,7 @@ start_server { test {Blocking XREAD: key type changed with SET} { r DEL mystream r XADD mystream 666 f v - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 0 STREAMS mystream "$" wait_for_blocked_clients_count 1 r SET mystream val1 @@ -352,7 +352,7 @@ start_server { } test {Blocking XREADGROUP for stream that ran dry (issue #5299)} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # Add a entry then delete it, now stream's last_id is 666. r DEL mystream @@ -378,7 +378,7 @@ start_server { } test "Blocking XREADGROUP will ignore BLOCK if ID is not >" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # Add a entry then delete it, now stream's last_id is 666. r DEL mystream @@ -427,8 +427,8 @@ start_server { } test {Blocking XREADGROUP for stream key that has clients blocked on list} { - set rd [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd [valkey_deferring_client] + set rd2 [valkey_deferring_client] # First delete the stream r DEL mystream @@ -479,9 +479,9 @@ start_server { r DEL mystream r XGROUP CREATE mystream mygroup $ MKSTREAM - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] + set rd3 [valkey_deferring_client] $rd1 xreadgroup GROUP mygroup myuser COUNT 10 BLOCK 10000 STREAMS mystream > $rd2 xreadgroup GROUP mygroup myuser COUNT 10 BLOCK 10000 STREAMS mystream > @@ -502,8 +502,8 @@ start_server { r DEL mystream r XGROUP CREATE mystream mygroup $ MKSTREAM - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] $rd1 xreadgroup GROUP mygroup myuser BLOCK 0 STREAMS mystream > wait_for_blocked_clients_count 1 @@ -530,7 +530,7 @@ start_server { r config resetstat r del mystream r XGROUP CREATE mystream mygroup $ MKSTREAM - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" wait_for_blocked_clients_count 1 r XGROUP DESTROY mystream mygroup @@ -546,7 +546,7 @@ start_server { test {RENAME can unblock XREADGROUP with data} { r del mystream{t} r XGROUP CREATE mystream{t} mygroup $ MKSTREAM - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream{t} ">" wait_for_blocked_clients_count 1 r XGROUP CREATE mystream2{t} mygroup $ MKSTREAM @@ -559,7 +559,7 @@ start_server { test {RENAME can unblock XREADGROUP with -NOGROUP} { r del mystream{t} r XGROUP CREATE mystream{t} mygroup $ MKSTREAM - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream{t} ">" wait_for_blocked_clients_count 1 r XADD mystream2{t} 100 f1 v1 @@ -1015,7 +1015,7 @@ start_server { r XGROUP CREATE mystream mygroup $ MKSTREAM r XADD mystream * f1 v1 r XREADGROUP GROUP mygroup Alice NOACK STREAMS mystream ">" - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Bob BLOCK 0 NOACK STREAMS mystream ">" wait_for_blocked_clients_count 1 r XADD mystream * f2 v2 @@ -1036,7 +1036,7 @@ start_server { r XGROUP CREATE mystream mygroup $ MKSTREAM r XADD mystream * f v r XREADGROUP GROUP mygroup Alice NOACK STREAMS mystream ">" - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREADGROUP GROUP mygroup Bob BLOCK 0 NOACK STREAMS mystream ">" wait_for_blocked_clients_count 1 r XGROUP CREATECONSUMER mystream mygroup Charlie diff --git a/tests/unit/type/stream.tcl b/tests/unit/type/stream.tcl index 06f58c8a2f..0c56dcd32c 100644 --- a/tests/unit/type/stream.tcl +++ b/tests/unit/type/stream.tcl @@ -32,7 +32,7 @@ proc streamRandomID {min_id max_id} { return $ms-$seq } -# Tcl-side implementation of XRANGE to perform fuzz testing in the Redis +# Tcl-side implementation of XRANGE to perform fuzz testing in the server # XRANGE implementation. proc streamSimulateXRANGE {items start end} { set res {} @@ -337,7 +337,7 @@ start_server { test {Blocking XREAD waiting new data} { r XADD s2{t} * old abcd1234 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 20000 STREAMS s1{t} s2{t} s3{t} $ $ $ wait_for_blocked_client r XADD s2{t} * new abcd1234 @@ -348,7 +348,7 @@ start_server { } test {Blocking XREAD waiting old data} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 20000 STREAMS s1{t} s2{t} s3{t} $ 0-0 $ r XADD s2{t} * foo abcd1234 set res [$rd read] @@ -362,7 +362,7 @@ start_server { r XADD s1 666 f v r XADD s1 667 f2 v2 r XDEL s1 667 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 10 STREAMS s1 666 after 20 assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {s1 {}} @@ -370,7 +370,7 @@ start_server { } test "Blocking XREAD for stream that ran dry (issue #5299)" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] # Add a entry then delete it, now stream's last_id is 666. r DEL mystream @@ -444,7 +444,7 @@ start_server { r DEL lestream # read last entry from stream, blocking - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 20000 STREAMS lestream + wait_for_blocked_client @@ -511,7 +511,7 @@ start_server { } test "XREAD: XADD + DEL should not awake client" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del s1 $rd XREAD BLOCK 20000 STREAMS s1 $ wait_for_blocked_clients_count 1 @@ -527,7 +527,7 @@ start_server { } test "XREAD: XADD + DEL + LPUSH should not awake client" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del s1 $rd XREAD BLOCK 20000 STREAMS s1 $ wait_for_blocked_clients_count 1 @@ -546,7 +546,7 @@ start_server { test {XREAD with same stream name multiple times should work} { r XADD s2 * old abcd1234 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 20000 STREAMS s2 s2 s2 $ $ $ wait_for_blocked_clients_count 1 r XADD s2 * new abcd1234 @@ -558,7 +558,7 @@ start_server { test {XREAD + multiple XADD inside transaction} { r XADD s2 * old abcd1234 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 20000 STREAMS s2 s2 s2 $ $ $ wait_for_blocked_clients_count 1 r MULTI @@ -682,7 +682,7 @@ start_server { test {XREAD streamID edge (blocking)} { r del x - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd XREAD BLOCK 0 STREAMS x 1-18446744073709551615 wait_for_blocked_clients_count 1 r XADD x 1-1 f v diff --git a/tests/unit/type/string.tcl b/tests/unit/type/string.tcl index 94702ec3dc..381cc4a693 100644 --- a/tests/unit/type/string.tcl +++ b/tests/unit/type/string.tcl @@ -501,7 +501,7 @@ if {[string match {*jemalloc*} [s mem_allocator]]} { test {trim on SET with big value} { # set a big value to trigger increasing the query buf r set key [string repeat A 100000] - # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) Redis will try to save the query buf itself on the DB. + # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) the server will try to save the query buf itself on the DB. r set key [string repeat A 33000] # asset the value was trimmed assert {[r memory usage key] < 42000}; # 42K to count for Jemalloc's additional memory overhead. diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl index 03f451c797..f6c643a5ef 100644 --- a/tests/unit/type/zset.tcl +++ b/tests/unit/type/zset.tcl @@ -1166,7 +1166,7 @@ start_server {tags {"zset"}} { foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} { test "$popmin/$popmax with a single existing sorted set - $encoding" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] create_zset zset {0 a 1 b 2 c 3 d} verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}} @@ -1178,7 +1178,7 @@ start_server {tags {"zset"}} { } test "$popmin/$popmax with multiple existing sorted sets - $encoding" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] create_zset z1{t} {0 a 1 b 2 c} create_zset z2{t} {3 d 4 e 5 f} @@ -1195,7 +1195,7 @@ start_server {tags {"zset"}} { } test "$popmin/$popmax second sorted set has members - $encoding" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del z1{t} create_zset z2{t} {3 d 4 e 5 f} @@ -1228,7 +1228,7 @@ start_server {tags {"zset"}} { foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} { test "$popmin/$popmax - $encoding RESP3" { r hello 3 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] create_zset zset {0 a 1 b 2 c 3 d} verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}} @@ -1334,7 +1334,7 @@ start_server {tags {"zset"}} { } {} {needs:repl} foreach resp {3 2} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] if {[lsearch $::denytags "resp3"] >= 0} { if {$resp == 3} {continue} @@ -1826,7 +1826,7 @@ start_server {tags {"zset"}} { # Make sure data is the same in both sides assert {[r zrange zset 0 -1] eq $lexset} - # Get the Redis output + # Get the server output set output [r $cmd zset $cmin $cmax] if {$rev} { set outlen [r zlexcount zset $cmax $cmin] @@ -1842,7 +1842,7 @@ start_server {tags {"zset"}} { # Empty output when ranges are inverted. } else { if {$rev} { - # Invert the Tcl array using Redis itself. + # Invert the Tcl array using the server itself. set copy [r zrevrange zset 0 -1] # Invert min / max as well lassign [list $min $max $mininc $maxinc] \ @@ -1952,7 +1952,7 @@ start_server {tags {"zset"}} { foreach {pop} {BZPOPMIN BZMPOP_MIN} { test "$pop, ZADD + DEL should not awake blocked client" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del zset bzpop_command $rd $pop zset 0 @@ -1970,7 +1970,7 @@ start_server {tags {"zset"}} { } test "$pop, ZADD + DEL + SET should not awake blocked client" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del zset bzpop_command $rd $pop zset 0 @@ -1992,7 +1992,7 @@ start_server {tags {"zset"}} { test {BZPOPMIN unblock but the key is expired and then block again - reprocessing command} { r flushall r debug set-active-expire 0 - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set start [clock milliseconds] $rd bzpopmin zset{t} 1 @@ -2018,7 +2018,7 @@ start_server {tags {"zset"}} { } {0} {needs:debug} test "BZPOPMIN with same key multiple times should work" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del z1{t} z2{t} # Data arriving after the BZPOPMIN. @@ -2043,7 +2043,7 @@ start_server {tags {"zset"}} { foreach {pop} {BZPOPMIN BZMPOP_MIN} { test "MULTI/EXEC is isolated from the point of view of $pop" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del zset bzpop_command $rd $pop zset 0 @@ -2060,7 +2060,7 @@ start_server {tags {"zset"}} { } test "$pop with variadic ZADD" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del zset if {$::valgrind} {after 100} bzpop_command $rd $pop zset 0 @@ -2074,7 +2074,7 @@ start_server {tags {"zset"}} { } test "$pop with zero timeout should block indefinitely" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] r del zset bzpop_command $rd $pop zset 0 wait_for_blocked_client @@ -2132,10 +2132,10 @@ start_server {tags {"zset"}} { } test "BZMPOP with multiple blocked clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - set rd3 [redis_deferring_client] - set rd4 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] + set rd3 [valkey_deferring_client] + set rd4 [valkey_deferring_client] r del myzset{t} myzset2{t} $rd1 bzmpop 0 2 myzset{t} myzset2{t} min count 1 @@ -2167,7 +2167,7 @@ start_server {tags {"zset"}} { } test "BZMPOP propagate as pop with count command to replica" { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] set repl [attach_to_replication_stream] # BZMPOP without being blocked. @@ -2213,8 +2213,8 @@ start_server {tags {"zset"}} { } {} {needs:repl} test "BZMPOP should not blocks on non key arguments - #10762" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd1 [valkey_deferring_client] + set rd2 [valkey_deferring_client] r del myzset myzset2 myzset3 $rd1 bzmpop 0 1 myzset min count 10 diff --git a/tests/unit/wait.tcl b/tests/unit/wait.tcl index 0f20ef87e9..dfd0be7483 100644 --- a/tests/unit/wait.tcl +++ b/tests/unit/wait.tcl @@ -70,8 +70,8 @@ start_server {} { } test {WAIT replica multiple clients unblock - reuse last result} { - set rd [redis_deferring_client -1] - set rd2 [redis_deferring_client -1] + set rd [valkey_deferring_client -1] + set rd2 [valkey_deferring_client -1] pause_process $slave_pid @@ -125,7 +125,7 @@ tags {"wait aof network external:skip"} { test {WAITAOF local wait and then stop aof} { r config set appendfsync no - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd incr foo $rd read $rd waitaof 1 0 0 @@ -187,7 +187,7 @@ tags {"wait aof network external:skip"} { $replica config set appendfsync no test {WAITAOF on demoted master gets unblocked with an error} { - set rd [redis_deferring_client] + set rd [valkey_deferring_client] $rd incr foo $rd read $rd waitaof 0 1 0 @@ -268,8 +268,8 @@ tags {"wait aof network external:skip"} { } test {WAITAOF replica multiple clients unblock - reuse last result} { - set rd [redis_deferring_client -1] - set rd2 [redis_deferring_client -1] + set rd [valkey_deferring_client -1] + set rd2 [valkey_deferring_client -1] pause_process $replica_pid @@ -311,7 +311,7 @@ tags {"wait aof network external:skip"} { } test {WAITAOF master without backlog, wait is released when the replica finishes full-sync} { - set rd [redis_deferring_client -1] + set rd [valkey_deferring_client -1] $rd incr foo $rd read $rd waitaof 0 1 0 @@ -401,8 +401,8 @@ tags {"wait aof network external:skip"} { } # add some writes and block a client on each master - set rd [redis_deferring_client -3] - set rd2 [redis_deferring_client -1] + set rd [valkey_deferring_client -3] + set rd2 [valkey_deferring_client -1] $rd set boo 11 $rd2 set boo 22 $rd read @@ -454,8 +454,8 @@ start_server {} { } test {WAIT and WAITAOF replica multiple clients unblock - reuse last result} { - set rd [redis_deferring_client] - set rd2 [redis_deferring_client] + set rd [valkey_deferring_client] + set rd2 [valkey_deferring_client] $master config set appendonly yes $replica1 config set appendonly yes diff --git a/utils/generate-command-code.py b/utils/generate-command-code.py index 673ef28466..05e218e780 100755 --- a/utils/generate-command-code.py +++ b/utils/generate-command-code.py @@ -576,7 +576,7 @@ def create_command(name, desc): """ /* We have fabulous commands from * the fantastic - * Redis Command Table! */ + * Command Table! */ /* Must match serverCommandGroup */ const char *COMMAND_GROUP_STR[] = { diff --git a/utils/hyperloglog/hll-err.rb b/utils/hyperloglog/hll-err.rb index 2c71ac5efc..e04cf93800 100644 --- a/utils/hyperloglog/hll-err.rb +++ b/utils/hyperloglog/hll-err.rb @@ -1,7 +1,7 @@ # hll-err.rb - Copyright (C) 2014 Salvatore Sanfilippo # BSD license, See the COPYING file for more information. # -# Check error of HyperLogLog Redis implementation for different set sizes. +# Check error of HyperLogLog implementation for different set sizes. require 'rubygems' require 'redis' diff --git a/utils/hyperloglog/hll-gnuplot-graph.rb b/utils/hyperloglog/hll-gnuplot-graph.rb index 6c7596d17c..61f0672637 100644 --- a/utils/hyperloglog/hll-gnuplot-graph.rb +++ b/utils/hyperloglog/hll-gnuplot-graph.rb @@ -2,7 +2,7 @@ # BSD license, See the COPYING file for more information. # # This program is suited to output average and maximum errors of -# the Redis HyperLogLog implementation in a format suitable to print +# the HyperLogLog implementation in a format suitable to print # graphs using gnuplot. require 'rubygems' @@ -12,7 +12,7 @@ # Generate an array of [cardinality,relative_error] pairs # in the 0 - max range, with the specified step. # -# 'r' is the Redis object used to perform the queries. +# 'r' is the Object used to perform the queries. # 'seed' must be different every time you want a test performed # with a different set. The function guarantees that if 'seed' is the # same, exactly the same dataset is used, and when it is different, diff --git a/utils/install_server.sh b/utils/install_server.sh index e22d88bb40..6a1f26d647 100755 --- a/utils/install_server.sh +++ b/utils/install_server.sh @@ -25,7 +25,7 @@ # ################################################################################ # -# Service installer for redis server, runs interactively by default. +# Service installer for the server, runs interactively by default. # # To run this script non-interactively (for automation/provisioning purposes), # feed the variables into the script. Any missing variables will be prompted! @@ -37,9 +37,9 @@ # REDIS_CONFIG_FILE=/etc/redis/1234.conf \ # REDIS_LOG_FILE=/var/log/redis_1234.log \ # REDIS_DATA_DIR=/var/lib/redis/1234 \ -# REDIS_EXECUTABLE=`command -v redis-server` ./utils/install_server.sh +# REDIS_EXECUTABLE=`command -v valkey-server` ./utils/install_server.sh # -# This generates a redis config file and an /etc/init.d script, and installs them. +# This generates a server config file and an /etc/init.d script, and installs them. # # /!\ This script should be run as root # @@ -85,7 +85,7 @@ unset _pid_1_exe if ! echo $REDIS_PORT | egrep -q '^[0-9]+$' ; then _MANUAL_EXECUTION=true - #Read the redis port + #Read the server port read -p "Please select the redis port for this instance: [$_REDIS_PORT] " REDIS_PORT if ! echo $REDIS_PORT | egrep -q '^[0-9]+$' ; then echo "Selecting default: $_REDIS_PORT" @@ -95,7 +95,7 @@ fi if [ -z "$REDIS_CONFIG_FILE" ] ; then _MANUAL_EXECUTION=true - #read the redis config file + #read the server config file _REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf" read -p "Please select the redis config file name [$_REDIS_CONFIG_FILE] " REDIS_CONFIG_FILE if [ -z "$REDIS_CONFIG_FILE" ] ; then @@ -106,7 +106,7 @@ fi if [ -z "$REDIS_LOG_FILE" ] ; then _MANUAL_EXECUTION=true - #read the redis log file path + #read the server log file path _REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log" read -p "Please select the redis log file name [$_REDIS_LOG_FILE] " REDIS_LOG_FILE if [ -z "$REDIS_LOG_FILE" ] ; then @@ -117,7 +117,7 @@ fi if [ -z "$REDIS_DATA_DIR" ] ; then _MANUAL_EXECUTION=true - #get the redis data directory + #get the server data directory _REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT" read -p "Please select the data directory for this instance [$_REDIS_DATA_DIR] " REDIS_DATA_DIR if [ -z "$REDIS_DATA_DIR" ] ; then @@ -128,7 +128,7 @@ fi if [ ! -x "$REDIS_EXECUTABLE" ] ; then _MANUAL_EXECUTION=true - #get the redis executable path + #get the server executable path _REDIS_EXECUTABLE=`command -v redis-server` read -p "Please select the redis executable path [$_REDIS_EXECUTABLE] " REDIS_EXECUTABLE if [ ! -x "$REDIS_EXECUTABLE" ] ; then @@ -141,7 +141,7 @@ if [ ! -x "$REDIS_EXECUTABLE" ] ; then fi fi -#check the default for redis cli +#check the default for valkey cli CLI_EXEC=`command -v redis-cli` if [ -z "$CLI_EXEC" ] ; then CLI_EXEC=`dirname $REDIS_EXECUTABLE`"/redis-cli" diff --git a/utils/lru/lfu-simulation.c b/utils/lru/lfu-simulation.c index 60105e55b0..59b5e332c0 100644 --- a/utils/lru/lfu-simulation.c +++ b/utils/lru/lfu-simulation.c @@ -8,7 +8,7 @@ int keyspace_size = 1000000; time_t switch_after = 30; /* Switch access pattern after N seconds. */ struct entry { - /* Field that the LFU Redis implementation will have (we have + /* Field that the LFU implementation will have (we have * 24 bits of total space in the object->lru field). */ uint8_t counter; /* Logarithmic counter. */ uint16_t decrtime; /* (Reduced precision) time of last decrement. */ diff --git a/utils/redis-copy.rb b/utils/redis-copy.rb index 7c5c52dd6c..9f8335c341 100644 --- a/utils/redis-copy.rb +++ b/utils/redis-copy.rb @@ -1,7 +1,7 @@ # redis-copy.rb - Copyright (C) 2009-2010 Salvatore Sanfilippo # BSD license, See the COPYING file for more information. # -# Copy the whole dataset from one Redis instance to another one +# Copy the whole dataset from one server instance to another one # # WARNING: this utility is deprecated and serves as a legacy adapter # for the more-robust redis-copy gem. diff --git a/utils/redis_init_script b/utils/redis_init_script index 006db87e5f..da8fee41ff 100755 --- a/utils/redis_init_script +++ b/utils/redis_init_script @@ -1,6 +1,6 @@ #!/bin/sh # -# Simple Redis init.d script conceived to work on Linux systems +# Simple server init.d script conceived to work on Linux systems # as it does use of the /proc filesystem. ### BEGIN INIT INFO diff --git a/utils/req-res-log-validator.py b/utils/req-res-log-validator.py index e2d471370c..d9d0df71af 100755 --- a/utils/req-res-log-validator.py +++ b/utils/req-res-log-validator.py @@ -159,7 +159,7 @@ def __init__(self, f, line_counter): count = int(line[1:]) for i in range(count): field = Response(f, line_counter) - # Redis allows fields to be non-strings but JSON doesn't. + # The server allows fields to be non-strings but JSON doesn't. # Luckily, for any kind of response we can validate, the fields are # always strings (example: XINFO STREAM) # The reason we can't always convert to string is because of DEBUG PROTOCOL MAP diff --git a/utils/speed-regression.tcl b/utils/speed-regression.tcl index bf35c7db4b..6802d8cd5e 100755 --- a/utils/speed-regression.tcl +++ b/utils/speed-regression.tcl @@ -26,7 +26,7 @@ proc run-tests branches { continue } - # Start the Redis server + # Start the server puts " starting the server... [exec ./redis-server -v]" set pids [exec echo "port $::port\nloglevel warning\n" | ./redis-server - > /dev/null 2> /dev/null &] puts " pids: $pids" @@ -83,7 +83,7 @@ proc combine-results {results} { } proc main {} { - # Note: the first branch is only used in order to get the redis-benchmark + # Note: the first branch is only used in order to get the valkey-benchmark # executable. Tests are performed starting from the second branch. set branches { slowset 2.2.0 2.4.0 unstable slowset diff --git a/utils/systemd-redis_multiple_servers@.service b/utils/systemd-redis_multiple_servers@.service deleted file mode 100644 index 108ccfc64b..0000000000 --- a/utils/systemd-redis_multiple_servers@.service +++ /dev/null @@ -1,37 +0,0 @@ -# example systemd template service unit file for multiple redis-servers -# -# You can use this file as a blueprint for your actual template service unit -# file, if you intend to run multiple independent redis-server instances in -# parallel using systemd's "template unit files" feature. If you do, you will -# want to choose a better basename for your service unit by renaming this file -# when copying it. -# -# Please take a look at the provided "systemd-redis_server.service" example -# service unit file, too, if you choose to use this approach at managing -# multiple redis-server instances via systemd. - -[Unit] -Description=Redis data structure server - instance %i -Documentation=https://redis.io/documentation -# This template unit assumes your redis-server configuration file(s) -# to live at /etc/redis/redis_server_.conf -AssertPathExists=/etc/redis/redis_server_%i.conf -#Before=your_application.service another_example_application.service -#AssertPathExists=/var/lib/redis - -[Service] -ExecStart=/usr/local/bin/redis-server /etc/redis/redis_server_%i.conf -LimitNOFILE=10032 -NoNewPrivileges=yes -#OOMScoreAdjust=-900 -#PrivateTmp=yes -Type=notify -TimeoutStartSec=infinity -TimeoutStopSec=infinity -UMask=0077 -#User=redis -#Group=redis -#WorkingDirectory=/var/lib/redis - -[Install] -WantedBy=multi-user.target diff --git a/utils/systemd-valkey_multiple_servers@.service b/utils/systemd-valkey_multiple_servers@.service new file mode 100644 index 0000000000..2d6f4e8982 --- /dev/null +++ b/utils/systemd-valkey_multiple_servers@.service @@ -0,0 +1,37 @@ +# example systemd template service unit file for multiple valkey-servers +# +# You can use this file as a blueprint for your actual template service unit +# file, if you intend to run multiple independent valkey-server instances in +# parallel using systemd's "template unit files" feature. If you do, you will +# want to choose a better basename for your service unit by renaming this file +# when copying it. +# +# Please take a look at the provided "systemd-valkey_server.service" example +# service unit file, too, if you choose to use this approach at managing +# multiple valkey-server instances via systemd. + +[Unit] +Description=Valkey data structure server - instance %i +Documentation=https://github.com/valkey-io/valkey-doc +# This template unit assumes your valkey-server configuration file(s) +# to live at /etc/valkey/valkey_server_.conf +AssertPathExists=/etc/valkey/valkey_server_%i.conf +#Before=your_application.service another_example_application.service +#AssertPathExists=/var/lib/valkey + +[Service] +ExecStart=/usr/local/bin/valkey-server /etc/valkey/valkey_server_%i.conf +LimitNOFILE=10032 +NoNewPrivileges=yes +#OOMScoreAdjust=-900 +#PrivateTmp=yes +Type=notify +TimeoutStartSec=infinity +TimeoutStopSec=infinity +UMask=0077 +#User=valkey +#Group=valkey +#WorkingDirectory=/var/lib/valkey + +[Install] +WantedBy=multi-user.target diff --git a/utils/systemd-redis_server.service b/utils/systemd-valkey_server.service similarity index 65% rename from utils/systemd-redis_server.service rename to utils/systemd-valkey_server.service index 15400b439e..0842105186 100644 --- a/utils/systemd-redis_server.service +++ b/utils/systemd-valkey_server.service @@ -1,13 +1,13 @@ -# example systemd service unit file for redis-server +# example systemd service unit file for valkey-server # -# In order to use this as a template for providing a redis service in your -# environment, _at the very least_ make sure to adapt the redis configuration +# In order to use this as a template for providing a valkey service in your +# environment, _at the very least_ make sure to adapt the valkey configuration # file you intend to use as needed (make sure to set "supervised systemd"), and # to set sane TimeoutStartSec and TimeoutStopSec property values in the unit's # "[Service]" section to fit your needs. # # Some properties, such as User= and Group=, are highly desirable for virtually -# all deployments of redis, but cannot be provided in a manner that fits all +# all deployments of valkey, but cannot be provided in a manner that fits all # expectable environments. Some of these properties have been commented out in # this example service unit file, but you are highly encouraged to set them to # fit your needs. @@ -16,16 +16,16 @@ # more information. [Unit] -Description=Redis data structure server -Documentation=https://redis.io/documentation +Description=Valkey data structure server +Documentation=https://github.com/valkey-io/valkey-doc #Before=your_application.service another_example_application.service -#AssertPathExists=/var/lib/redis +#AssertPathExists=/var/lib/valkey Wants=network-online.target After=network-online.target [Service] ExecStart=/usr/local/bin/valkey-server --supervised systemd --daemonize no -## Alternatively, have redis-server load a configuration file: +## Alternatively, have valkey-server load a configuration file: #ExecStart=/usr/local/bin/valkey-server /path/to/your/valkey.conf LimitNOFILE=10032 NoNewPrivileges=yes @@ -35,9 +35,9 @@ Type=notify TimeoutStartSec=infinity TimeoutStopSec=infinity UMask=0077 -#User=redis -#Group=redis -#WorkingDirectory=/var/lib/redis +#User=valkey +#Group=valkey +#WorkingDirectory=/var/lib/valkey [Install] WantedBy=multi-user.target diff --git a/utils/tracking_collisions.c b/utils/tracking_collisions.c index f52111173d..4df3e84af5 100644 --- a/utils/tracking_collisions.c +++ b/utils/tracking_collisions.c @@ -1,6 +1,6 @@ /* This is a small program used in order to understand the collision rate * of CRC64 (ISO version) VS other stronger hashing functions in the context - * of hashing keys for the Redis "tracking" feature (client side caching + * of hashing keys for the "tracking" feature (client side caching * assisted by the server). * * The program attempts to hash keys with common names in the form of diff --git a/valkey.conf b/valkey.conf index 33442b340d..d3073bc21e 100644 --- a/valkey.conf +++ b/valkey.conf @@ -329,7 +329,7 @@ daemonize no # # When the server runs non daemonized, no pid file is created if none is # specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/valkey.pid". +# is used even if not specified, defaulting to "/var/run/redis.pid". # # Creating a pid file is best effort: if the server is not able to create it # nothing bad happens, the server will start and run normally.