diff --git a/deps/Makefile b/deps/Makefile index 67b7d41026..f1e4bd6ce2 100644 --- a/deps/Makefile +++ b/deps/Makefile @@ -79,7 +79,7 @@ ifeq ($(uname_S),SunOS) LUA_CFLAGS= -D__C99FEATURES__=1 endif -LUA_CFLAGS+= -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP $(CFLAGS) +LUA_CFLAGS+= -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DLUA_USE_MKSTEMP $(CFLAGS) LUA_LDFLAGS+= $(LDFLAGS) ifeq ($(LUA_DEBUG),yes) LUA_CFLAGS+= -O0 -g -DLUA_USE_APICHECK diff --git a/src/Makefile b/src/Makefile index 302ad06b84..2217597d1f 100644 --- a/src/Makefile +++ b/src/Makefile @@ -35,7 +35,7 @@ DEPENDENCY_TARGETS=hiredis linenoise lua hdr_histogram fpconv NODEPS:=clean distclean # Default settings -STD=-pedantic -DSERVER_STATIC='' +STD=-pedantic # Use -Wno-c11-extensions on clang, either where explicitly used or on # platforms we can assume it's being used. diff --git a/src/cluster.c b/src/cluster.c index 8aa6793ba8..d9da706c7b 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1449,20 +1449,12 @@ void askingCommand(client *c) { * In this mode replica will not redirect clients as long as clients access * with read-only commands to keys that are served by the replica's primary. */ void readonlyCommand(client *c) { - if (server.cluster_enabled == 0) { - addReplyError(c, "This instance has cluster support disabled"); - return; - } c->flags |= CLIENT_READONLY; addReply(c, shared.ok); } /* The READWRITE command just clears the READONLY command state. */ void readwriteCommand(client *c) { - if (server.cluster_enabled == 0) { - addReplyError(c, "This instance has cluster support disabled"); - return; - } c->flags &= ~CLIENT_READONLY; addReply(c, shared.ok); } diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index def572c249..b913cd5671 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -1185,6 +1185,7 @@ clusterLink *createClusterLink(clusterNode *node) { * This function will just make sure that the original node associated * with this link will have the 'link' field set to NULL. */ void freeClusterLink(clusterLink *link) { + serverAssert(link != NULL); if (link->conn) { connClose(link->conn); link->conn = NULL; @@ -5815,6 +5816,10 @@ int handleDebugClusterCommand(client *c) { addReplyErrorFormat(c, "Unknown node %s", (char *)c->argv[4]->ptr); return 1; } + if (n == server.cluster->myself) { + addReplyErrorFormat(c, "Cannot free cluster link(s) to myself"); + return 1; + } /* Terminate the link based on the direction or all. */ if (!strcasecmp(c->argv[3]->ptr, "from")) { diff --git a/src/commands.def b/src/commands.def index e4484529a2..ef84aa0ccb 100644 --- a/src/commands.def +++ b/src/commands.def @@ -1089,6 +1089,28 @@ struct COMMAND_ARG CLIENT_CACHING_Args[] = { {MAKE_ARG("mode",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_NONE,2,NULL),.subargs=CLIENT_CACHING_mode_Subargs}, }; +/********** CLIENT CAPA ********************/ + +#ifndef SKIP_CMD_HISTORY_TABLE +/* CLIENT CAPA history */ +#define CLIENT_CAPA_History NULL +#endif + +#ifndef SKIP_CMD_TIPS_TABLE +/* CLIENT CAPA tips */ +#define CLIENT_CAPA_Tips NULL +#endif + +#ifndef SKIP_CMD_KEY_SPECS_TABLE +/* CLIENT CAPA key specs */ +#define CLIENT_CAPA_Keyspecs NULL +#endif + +/* CLIENT CAPA argument table */ +struct COMMAND_ARG CLIENT_CAPA_Args[] = { +{MAKE_ARG("capability",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_MULTIPLE,0,NULL)}, +}; + /********** CLIENT GETNAME ********************/ #ifndef SKIP_CMD_HISTORY_TABLE @@ -1552,6 +1574,7 @@ struct COMMAND_ARG CLIENT_UNBLOCK_Args[] = { /* CLIENT command table */ struct COMMAND_STRUCT CLIENT_Subcommands[] = { {MAKE_CMD("caching","Instructs the server whether to track the keys in the next request.","O(1)","6.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_CACHING_History,0,CLIENT_CACHING_Tips,0,clientCommand,3,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_CACHING_Keyspecs,0,NULL,1),.args=CLIENT_CACHING_Args}, +{MAKE_CMD("capa","A client claims its capability.","O(1)","8.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_CAPA_History,0,CLIENT_CAPA_Tips,0,clientCommand,-3,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,CLIENT_CAPA_Keyspecs,0,NULL,1),.args=CLIENT_CAPA_Args}, {MAKE_CMD("getname","Returns the name of the connection.","O(1)","2.6.9",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_GETNAME_History,0,CLIENT_GETNAME_Tips,0,clientCommand,2,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_GETNAME_Keyspecs,0,NULL,0)}, {MAKE_CMD("getredir","Returns the client ID to which the connection's tracking notifications are redirected.","O(1)","6.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_GETREDIR_History,0,CLIENT_GETREDIR_Tips,0,clientCommand,2,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_GETREDIR_Keyspecs,0,NULL,0)}, {MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","5.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_HELP_History,0,CLIENT_HELP_Tips,0,clientCommand,2,CMD_LOADING|CMD_STALE|CMD_SENTINEL,ACL_CATEGORY_CONNECTION,CLIENT_HELP_Keyspecs,0,NULL,0)}, @@ -10703,8 +10726,8 @@ struct COMMAND_STRUCT serverCommandTable[] = { /* cluster */ {MAKE_CMD("asking","Signals that a cluster client is following an -ASK redirect.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,ASKING_History,0,ASKING_Tips,0,askingCommand,1,CMD_FAST,ACL_CATEGORY_CONNECTION,ASKING_Keyspecs,0,NULL,0)}, {MAKE_CMD("cluster","A container for Cluster commands.","Depends on subcommand.","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,CLUSTER_History,0,CLUSTER_Tips,0,NULL,-2,0,0,CLUSTER_Keyspecs,0,NULL,0),.subcommands=CLUSTER_Subcommands}, -{MAKE_CMD("readonly","Enables read-only queries for a connection to a Cluster replica node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,READONLY_History,0,READONLY_Tips,0,readonlyCommand,1,CMD_FAST|CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,READONLY_Keyspecs,0,NULL,0)}, -{MAKE_CMD("readwrite","Enables read-write queries for a connection to a Reids Cluster replica node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,READWRITE_History,0,READWRITE_Tips,0,readwriteCommand,1,CMD_FAST|CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,READWRITE_Keyspecs,0,NULL,0)}, +{MAKE_CMD("readonly","Enables read-only queries for a connection to a Valkey replica node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,READONLY_History,0,READONLY_Tips,0,readonlyCommand,1,CMD_FAST|CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,READONLY_Keyspecs,0,NULL,0)}, +{MAKE_CMD("readwrite","Enables read-write queries for a connection to a Valkey replica node.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"cluster",COMMAND_GROUP_CLUSTER,READWRITE_History,0,READWRITE_Tips,0,readwriteCommand,1,CMD_FAST|CMD_LOADING|CMD_STALE,ACL_CATEGORY_CONNECTION,READWRITE_Keyspecs,0,NULL,0)}, /* connection */ {MAKE_CMD("auth","Authenticates the connection.","O(N) where N is the number of passwords defined for the user","1.0.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,AUTH_History,1,AUTH_Tips,0,authCommand,-2,CMD_NOSCRIPT|CMD_LOADING|CMD_STALE|CMD_FAST|CMD_NO_AUTH|CMD_SENTINEL|CMD_ALLOW_BUSY,ACL_CATEGORY_CONNECTION,AUTH_Keyspecs,0,NULL,2),.args=AUTH_Args}, {MAKE_CMD("client","A container for client connection commands.","Depends on subcommand.","2.4.0",CMD_DOC_NONE,NULL,NULL,"connection",COMMAND_GROUP_CONNECTION,CLIENT_History,0,CLIENT_Tips,0,NULL,-2,CMD_SENTINEL,0,CLIENT_Keyspecs,0,NULL,0),.subcommands=CLIENT_Subcommands}, diff --git a/src/commands/client-capa.json b/src/commands/client-capa.json new file mode 100644 index 0000000000..3c16cd44f9 --- /dev/null +++ b/src/commands/client-capa.json @@ -0,0 +1,29 @@ +{ + "CAPA": { + "summary": "A client claims its capability.", + "complexity": "O(1)", + "group": "connection", + "since": "8.0.0", + "arity": -3, + "container": "CLIENT", + "function": "clientCommand", + "command_flags": [ + "NOSCRIPT", + "LOADING", + "STALE" + ], + "acl_categories": [ + "CONNECTION" + ], + "reply_schema": { + "const": "OK" + }, + "arguments": [ + { + "multiple": "true", + "name": "capability", + "type": "string" + } + ] + } +} diff --git a/src/commands/readonly.json b/src/commands/readonly.json index 4478cfb797..8fe27c6d99 100644 --- a/src/commands/readonly.json +++ b/src/commands/readonly.json @@ -1,6 +1,6 @@ { "READONLY": { - "summary": "Enables read-only queries for a connection to a Cluster replica node.", + "summary": "Enables read-only queries for a connection to a Valkey replica node.", "complexity": "O(1)", "group": "cluster", "since": "3.0.0", diff --git a/src/commands/readwrite.json b/src/commands/readwrite.json index 440dd596b9..dd3762ff8c 100644 --- a/src/commands/readwrite.json +++ b/src/commands/readwrite.json @@ -1,6 +1,6 @@ { "READWRITE": { - "summary": "Enables read-write queries for a connection to a Reids Cluster replica node.", + "summary": "Enables read-write queries for a connection to a Valkey replica node.", "complexity": "O(1)", "group": "cluster", "since": "3.0.0", diff --git a/src/config.c b/src/config.c index 2a692ac8fa..1088613255 100644 --- a/src/config.c +++ b/src/config.c @@ -3100,6 +3100,7 @@ standardConfig static_configs[] = { /* SDS Configs */ createSDSConfig("primaryauth", "masterauth", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_auth, NULL, NULL, NULL), createSDSConfig("requirepass", NULL, MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.requirepass, NULL, NULL, updateRequirePass), + createSDSConfig("availability-zone", NULL, MODIFIABLE_CONFIG, 0, server.availability_zone, "", NULL, NULL), /* Enum Configs */ createEnumConfig("supervised", NULL, IMMUTABLE_CONFIG, supervised_mode_enum, server.supervised_mode, SUPERVISED_NONE, NULL, NULL), diff --git a/src/networking.c b/src/networking.c index dff4226c54..ba40db6c61 100644 --- a/src/networking.c +++ b/src/networking.c @@ -168,6 +168,7 @@ client *createClient(connection *conn) { c->bulklen = -1; c->sentlen = 0; c->flags = 0; + c->capa = 0; c->slot = -1; c->ctime = c->last_interaction = server.unixtime; c->duration = 0; @@ -3589,6 +3590,13 @@ NULL } else { addReplyErrorObject(c, shared.syntaxerr); } + } else if (!strcasecmp(c->argv[1]->ptr, "capa") && c->argc >= 3) { + for (int i = 2; i < c->argc; i++) { + if (!strcasecmp(c->argv[i]->ptr, "redirect")) { + c->capa |= CLIENT_CAPA_REDIRECT; + } + } + addReply(c, shared.ok); } else { addReplySubcommandSyntaxError(c); } diff --git a/src/quicklist.c b/src/quicklist.c index b56763c0fe..f381afb00c 100644 --- a/src/quicklist.c +++ b/src/quicklist.c @@ -39,10 +39,6 @@ #include "lzf.h" #include "serverassert.h" -#ifndef REDIS_STATIC -#define REDIS_STATIC static -#endif - /* Optimization levels for size-based filling. * Note that the largest possible limit is 64k, so even if each record takes * just one byte, it still won't overflow the 16 bit count field. */ @@ -100,8 +96,8 @@ quicklistBookmark *_quicklistBookmarkFindByName(quicklist *ql, const char *name) quicklistBookmark *_quicklistBookmarkFindByNode(quicklist *ql, quicklistNode *node); void _quicklistBookmarkDelete(quicklist *ql, quicklistBookmark *bm); -REDIS_STATIC quicklistNode *_quicklistSplitNode(quicklistNode *node, int offset, int after); -REDIS_STATIC quicklistNode *_quicklistMergeNodes(quicklist *quicklist, quicklistNode *center); +static quicklistNode *_quicklistSplitNode(quicklistNode *node, int offset, int after); +static quicklistNode *_quicklistMergeNodes(quicklist *quicklist, quicklistNode *center); /* Simple way to give quicklistEntry structs default values with one call. */ #define initEntry(e) \ @@ -169,7 +165,7 @@ quicklist *quicklistNew(int fill, int compress) { return quicklist; } -REDIS_STATIC quicklistNode *quicklistCreateNode(void) { +static quicklistNode *quicklistCreateNode(void) { quicklistNode *node; node = zmalloc(sizeof(*node)); node->entry = NULL; @@ -213,7 +209,7 @@ void quicklistRelease(quicklist *quicklist) { /* Compress the listpack in 'node' and update encoding details. * Returns 1 if listpack compressed successfully. * Returns 0 if compression failed or if listpack too small to compress. */ -REDIS_STATIC int __quicklistCompressNode(quicklistNode *node) { +static int __quicklistCompressNode(quicklistNode *node) { #ifdef SERVER_TEST node->attempted_compress = 1; #endif @@ -253,7 +249,7 @@ REDIS_STATIC int __quicklistCompressNode(quicklistNode *node) { /* Uncompress the listpack in 'node' and update encoding details. * Returns 1 on successful decode, 0 on failure to decode. */ -REDIS_STATIC int __quicklistDecompressNode(quicklistNode *node) { +static int __quicklistDecompressNode(quicklistNode *node) { #ifdef SERVER_TEST node->attempted_compress = 0; #endif @@ -304,7 +300,7 @@ size_t quicklistGetLzf(const quicklistNode *node, void **data) { * The only way to guarantee interior nodes get compressed is to iterate * to our "interior" compress depth then compress the next node we find. * If compress depth is larger than the entire list, we return immediately. */ -REDIS_STATIC void __quicklistCompress(const quicklist *quicklist, quicklistNode *node) { +static void __quicklistCompress(const quicklist *quicklist, quicklistNode *node) { if (quicklist->len == 0) return; /* The head and tail should never be compressed (we should not attempt to recompress them) */ @@ -398,8 +394,7 @@ REDIS_STATIC void __quicklistCompress(const quicklist *quicklist, quicklistNode * Insert 'new_node' before 'old_node' if 'after' is 0. * Note: 'new_node' is *always* uncompressed, so if we assign it to * head or tail, we do not need to uncompress it. */ -REDIS_STATIC void -__quicklistInsertNode(quicklist *quicklist, quicklistNode *old_node, quicklistNode *new_node, int after) { +static void __quicklistInsertNode(quicklist *quicklist, quicklistNode *old_node, quicklistNode *new_node, int after) { if (after) { new_node->prev = old_node; if (old_node) { @@ -431,11 +426,11 @@ __quicklistInsertNode(quicklist *quicklist, quicklistNode *old_node, quicklistNo } /* Wrappers for node inserting around existing node. */ -REDIS_STATIC void _quicklistInsertNodeBefore(quicklist *quicklist, quicklistNode *old_node, quicklistNode *new_node) { +static void _quicklistInsertNodeBefore(quicklist *quicklist, quicklistNode *old_node, quicklistNode *new_node) { __quicklistInsertNode(quicklist, old_node, new_node, 0); } -REDIS_STATIC void _quicklistInsertNodeAfter(quicklist *quicklist, quicklistNode *old_node, quicklistNode *new_node) { +static void _quicklistInsertNodeAfter(quicklist *quicklist, quicklistNode *old_node, quicklistNode *new_node) { __quicklistInsertNode(quicklist, old_node, new_node, 1); } @@ -496,7 +491,7 @@ static int isLargeElement(size_t sz, int fill) { return sz > quicklistNodeNegFillLimit(fill); } -REDIS_STATIC int _quicklistNodeAllowInsert(const quicklistNode *node, const int fill, const size_t sz) { +static int _quicklistNodeAllowInsert(const quicklistNode *node, const int fill, const size_t sz) { if (unlikely(!node)) return 0; if (unlikely(QL_NODE_IS_PLAIN(node) || isLargeElement(sz, fill))) return 0; @@ -511,7 +506,7 @@ REDIS_STATIC int _quicklistNodeAllowInsert(const quicklistNode *node, const int return 1; } -REDIS_STATIC int _quicklistNodeAllowMerge(const quicklistNode *a, const quicklistNode *b, const int fill) { +static int _quicklistNodeAllowMerge(const quicklistNode *a, const quicklistNode *b, const int fill) { if (!a || !b) return 0; if (unlikely(QL_NODE_IS_PLAIN(a) || QL_NODE_IS_PLAIN(b))) return 0; @@ -640,7 +635,7 @@ void quicklistAppendPlainNode(quicklist *quicklist, unsigned char *data, size_t } \ } while (0) -REDIS_STATIC void __quicklistDelNode(quicklist *quicklist, quicklistNode *node) { +static void __quicklistDelNode(quicklist *quicklist, quicklistNode *node) { /* Update the bookmark if any */ quicklistBookmark *bm = _quicklistBookmarkFindByNode(quicklist, node); if (bm) { @@ -680,7 +675,7 @@ REDIS_STATIC void __quicklistDelNode(quicklist *quicklist, quicklistNode *node) * * Returns 1 if the entire node was deleted, 0 if node still exists. * Also updates in/out param 'p' with the next offset in the listpack. */ -REDIS_STATIC int quicklistDelIndex(quicklist *quicklist, quicklistNode *node, unsigned char **p) { +static int quicklistDelIndex(quicklist *quicklist, quicklistNode *node, unsigned char **p) { int gone = 0; if (unlikely(QL_NODE_IS_PLAIN(node))) { @@ -824,7 +819,7 @@ int quicklistReplaceAtIndex(quicklist *quicklist, long index, void *data, size_t * * Returns the input node picked to merge against or NULL if * merging was not possible. */ -REDIS_STATIC quicklistNode *_quicklistListpackMerge(quicklist *quicklist, quicklistNode *a, quicklistNode *b) { +static quicklistNode *_quicklistListpackMerge(quicklist *quicklist, quicklistNode *a, quicklistNode *b) { D("Requested merge (a,b) (%u, %u)", a->count, b->count); quicklistDecompressNode(a); @@ -864,7 +859,7 @@ REDIS_STATIC quicklistNode *_quicklistListpackMerge(quicklist *quicklist, quickl * * Returns the new 'center' after merging. */ -REDIS_STATIC quicklistNode *_quicklistMergeNodes(quicklist *quicklist, quicklistNode *center) { +static quicklistNode *_quicklistMergeNodes(quicklist *quicklist, quicklistNode *center) { int fill = quicklist->fill; quicklistNode *prev, *prev_prev, *next, *next_next, *target; prev = prev_prev = next = next_next = target = NULL; @@ -926,7 +921,7 @@ REDIS_STATIC quicklistNode *_quicklistMergeNodes(quicklist *quicklist, quicklist * The input node keeps all elements not taken by the returned node. * * Returns newly created node or NULL if split not possible. */ -REDIS_STATIC quicklistNode *_quicklistSplitNode(quicklistNode *node, int offset, int after) { +static quicklistNode *_quicklistSplitNode(quicklistNode *node, int offset, int after) { size_t zl_sz = node->sz; quicklistNode *new_node = quicklistCreateNode(); @@ -962,8 +957,7 @@ REDIS_STATIC quicklistNode *_quicklistSplitNode(quicklistNode *node, int offset, * * If after==1, the new value is inserted after 'entry', otherwise * the new value is inserted before 'entry'. */ -REDIS_STATIC void -_quicklistInsert(quicklistIter *iter, quicklistEntry *entry, void *value, const size_t sz, int after) { +static void _quicklistInsert(quicklistIter *iter, quicklistEntry *entry, void *value, const size_t sz, int after) { quicklist *quicklist = iter->quicklist; int full = 0, at_tail = 0, at_head = 0, avail_next = 0, avail_prev = 0; int fill = quicklist->fill; @@ -1548,7 +1542,7 @@ int quicklistPopCustom(quicklist *quicklist, } /* Return a malloc'd copy of data passed in */ -REDIS_STATIC void *_quicklistSaver(unsigned char *data, size_t sz) { +static void *_quicklistSaver(unsigned char *data, size_t sz) { unsigned char *vstr; if (data) { vstr = zmalloc(sz); diff --git a/src/sds.c b/src/sds.c index c52c14759b..1c0ddd559d 100644 --- a/src/sds.c +++ b/src/sds.c @@ -415,7 +415,7 @@ size_t sdsAllocSize(sds s) { char type = s[-1] & SDS_TYPE_MASK; /* SDS_TYPE_5 header doesn't contain the size of the allocation */ if (type == SDS_TYPE_5) { - return s_malloc_size(sdsAllocPtr(s)); + return s_malloc_usable_size(sdsAllocPtr(s)); } else { return sdsHdrSize(type) + sdsalloc(s) + 1; } diff --git a/src/sdsalloc.h b/src/sdsalloc.h index 6fd076d9f0..dfa8257ebd 100644 --- a/src/sdsalloc.h +++ b/src/sdsalloc.h @@ -51,5 +51,6 @@ #define s_trymalloc_usable ztrymalloc_usable #define s_tryrealloc_usable ztryrealloc_usable #define s_malloc_size zmalloc_size +#define s_malloc_usable_size zmalloc_usable_size #endif diff --git a/src/server.c b/src/server.c index fe522b3e5d..ee1bcd088f 100644 --- a/src/server.c +++ b/src/server.c @@ -3867,6 +3867,12 @@ int processCommand(client *c) { } } + if (!server.cluster_enabled && c->capa & CLIENT_CAPA_REDIRECT && server.primary_host && !mustObeyClient(c) && + (is_write_command || (is_read_command && !(c->flags & CLIENT_READONLY)))) { + addReplyErrorSds(c, sdscatprintf(sdsempty(), "-REDIRECT %s:%d", server.primary_host, server.primary_port)); + return C_OK; + } + /* Disconnect some clients if total clients memory is too high. We do this * before key eviction, after the last command was executed and consumed * some client output buffer memory. */ @@ -5381,7 +5387,8 @@ sds genValkeyInfoString(dict *section_dict, int all_sections, int everything) { "lru_clock:%u\r\n", server.lruclock, "executable:%s\r\n", server.executable ? server.executable : "", "config_file:%s\r\n", server.configfile ? server.configfile : "", - "io_threads_active:%i\r\n", server.io_threads_active)); + "io_threads_active:%i\r\n", server.io_threads_active, + "availability_zone:%s\r\n", server.availability_zone)); /* clang-format on */ /* Conditional properties */ diff --git a/src/server.h b/src/server.h index a12f091ba9..90efc6aa9c 100644 --- a/src/server.h +++ b/src/server.h @@ -429,6 +429,9 @@ extern int configOOMScoreAdjValuesDefaults[CONFIG_OOM_COUNT]; #define CLIENT_REPLICATION_DONE (1ULL << 51) /* Indicate that replication has been done on the client */ #define CLIENT_AUTHENTICATED (1ULL << 52) /* Indicate a client has successfully authenticated */ +/* Client capabilities */ +#define CLIENT_CAPA_REDIRECT (1 << 0) /* Indicate that the client can handle redirection */ + /* Client block type (btype field in client structure) * if CLIENT_BLOCKED flag is set. */ typedef enum blocking_type { @@ -1205,6 +1208,7 @@ typedef struct client { uint64_t flags; /* Client flags: CLIENT_* macros. */ connection *conn; int resp; /* RESP protocol version. Can be 2 or 3. */ + uint32_t capa; /* Client capabilities: CLIENT_CAPA* macros. */ serverDb *db; /* Pointer to currently SELECTed DB. */ robj *name; /* As set by CLIENT SETNAME. */ robj *lib_name; /* The client library name as set by CLIENT SETINFO. */ @@ -2121,6 +2125,7 @@ struct valkeyServer { is down, doesn't affect pubsub global. */ long reply_buffer_peak_reset_time; /* The amount of time (in milliseconds) to wait between reply buffer peak resets */ int reply_buffer_resizing_enabled; /* Is reply buffer resizing enabled (1 by default) */ + sds availability_zone; /* When run in a cloud environment we can configure the availability zone it is running in */ /* Local environment */ char *locale_collate; }; diff --git a/tests/README.md b/tests/README.md index 9d9c657760..efe936aa5b 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,6 +1,18 @@ Valkey Test Suite ================= +Overview +-------- + +Integration tests are written in Tcl, a high-level, general-purpose, interpreted, dynamic programming language [[source](https://wiki.tcl-lang.org/page/What+is+Tcl)]. +`runtest` is the main entrance point for running integration tests. +For example, to run a single test; + +```shell +./runtest --single unit/your_test_name +# For additional arguments, you may refer to the `runtest` script itself. +``` + The normal execution mode of the test suite involves starting and manipulating local `valkey-server` instances, inspecting process state, log files, etc. @@ -19,6 +31,26 @@ match different external server configurations: | `--cluster-mode` | Run in strict Valkey Cluster compatibility mode. | | `--large-memory` | Enables tests that consume more than 100mb | +Debugging +--------- + +You can set a breakpoint and invoke a minimal debugger using the `bp` function. + +``` +... your test code before break-point +bp 1 +... your test code after break-point +``` + +The `bp 1` will give back the tcl interpreter to the developer, and allow you to interactively print local variables (through `puts`), run functions and so forth [[source](https://wiki.tcl-lang.org/page/A+minimal+debugger)]. +`bp` takes a single argument, which is `1` for the case above, and is used to label a breakpoint with a string. +Labels are printed out when breakpoints are hit, so you can identify which breakpoint was triggered. +Breakpoints can be skipped by setting the global variable `::bp_skip`, and by providing the labels you want to skip. + +The minimal debugger comes with the following predefined functions. +* Press `c` to continue past the breakpoint. +* Press `i` to print local variables. + Tags ---- diff --git a/tests/integration/replica-redirect.tcl b/tests/integration/replica-redirect.tcl new file mode 100644 index 0000000000..0db51dd3ff --- /dev/null +++ b/tests/integration/replica-redirect.tcl @@ -0,0 +1,36 @@ +start_server {tags {needs:repl external:skip}} { + start_server {} { + set primary_host [srv -1 host] + set primary_port [srv -1 port] + + r replicaof $primary_host $primary_port + wait_for_condition 50 100 { + [s 0 master_link_status] eq {up} + } else { + fail "Replicas not replicating from primary" + } + + test {replica allow read command by default} { + r get foo + } {} + + test {replica reply READONLY error for write command by default} { + assert_error {READONLY*} {r set foo bar} + } + + test {replica redirect read and write command after CLIENT CAPA REDIRECT} { + r client capa redirect + assert_error "REDIRECT $primary_host:$primary_port" {r set foo bar} + assert_error "REDIRECT $primary_host:$primary_port" {r get foo} + } + + test {non-data access commands are not redirected} { + r ping + } {PONG} + + test {replica allow read command in READONLY mode} { + r readonly + r get foo + } {} + } +} diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 9d69e44232..c6c405b191 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -1162,3 +1162,25 @@ proc generate_largevalue_test_array {} { set largevalue(quicklist) [string repeat "x" 8192] return [array get largevalue] } + +# Breakpoint function, which invokes a minimal debugger. +# This function can be placed within the desired Tcl tests for debugging purposes. +# +# Arguments: +# * 's': breakpoint label, which is printed when breakpoints are hit for unique identification. +# +# Source: https://wiki.tcl-lang.org/page/A+minimal+debugger +proc bp {{s {}}} { + if ![info exists ::bp_skip] { + set ::bp_skip [list] + } elseif {[lsearch -exact $::bp_skip $s]>=0} return + if [catch {info level -1} who] {set who ::} + while 1 { + puts -nonewline "$who/$s> "; flush stdout + gets stdin line + if {$line=="c"} {puts "continuing.."; break} + if {$line=="i"} {set line "info locals"} + catch {uplevel 1 $line} res + puts $res + } +} diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster index f4f44871fc..cfad7fbf9d 100755 --- a/utils/create-cluster/create-cluster +++ b/utils/create-cluster/create-cluster @@ -28,7 +28,7 @@ then while [ $((PORT < ENDPORT)) != "0" ]; do PORT=$((PORT+1)) echo "Starting $PORT" - $BIN_PATH/valkey-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes ${ADDITIONAL_OPTIONS} + $BIN_PATH/valkey-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS} done exit 0 fi @@ -70,7 +70,7 @@ then while [ $((PORT < ENDPORT)) != "0" ]; do PORT=$((PORT+1)) echo "Starting $PORT" - $BIN_PATH/valkey-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes ${ADDITIONAL_OPTIONS} + $BIN_PATH/valkey-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes ${ADDITIONAL_OPTIONS} done exit 0 fi diff --git a/valkey.conf b/valkey.conf index 05301d1bee..e4ffd0f8ad 100644 --- a/valkey.conf +++ b/valkey.conf @@ -2319,3 +2319,9 @@ jemalloc-bg-thread yes # to suppress # # ignore-warnings ARM64-COW-BUG + +# Inform Valkey of the availability zone if running in a cloud environment. Currently +# this is only exposed via the info command for clients to use, but in the future we +# we may also use this when making decisions for replication. +# +# availability-zone "zone-name" \ No newline at end of file