From 5b1fd222ed97430dfeef27a95aeb7e1fb8fd1352 Mon Sep 17 00:00:00 2001 From: Madelyn Olson Date: Thu, 2 May 2024 20:00:04 -0700 Subject: [PATCH 01/10] An initial simple unit test framework (#344) The core idea was to take a lot of the stuff from the C unity framework and adapt it a bit here. Each file in the `unit` directory that starts with `test_` is automatically assumed to be a test suite. Within each file, all functions that start with `test_` are assumed to be a test. See unit/README.md for details about the implementation. Instead of compiling basically a net new binary, the way the tests are compiled is that the main valkey server is compiled as a static archive, which we then compile the individual test files against to create a new test executable. This is not all that important now, other than it makes the compilation simpler, but what it will allow us to do is overwrite functions in the archive to enable mocking for cross compilation unit functions. There are also ways to enable mocking from within the same compilation unit, but I don't know how important this is. Tests are also written in one of two styles: 1. Including the header file and directly calling functions from the archive. 2. Importing the original file, and then calling the functions. This second approach is cool because we can call static functions. It won't mess up the archive either. --------- Signed-off-by: Madelyn Olson --- .github/workflows/ci.yml | 5 +- .github/workflows/daily.yml | 31 +++- .gitignore | 2 + src/Makefile | 41 +++++- src/crc64.c | 223 ---------------------------- src/crc64.h | 4 - src/intset.c | 217 --------------------------- src/server.c | 2 - src/unit/README.md | 54 +++++++ src/unit/test_crc64.c | 30 ++++ src/unit/test_crc64combine.c | 193 ++++++++++++++++++++++++ src/unit/test_files.h | 31 ++++ src/unit/test_help.h | 48 ++++++ src/unit/test_intset.c | 229 +++++++++++++++++++++++++++++ src/unit/test_main.c | 67 +++++++++ utils/generate-unit-test-header.py | 59 ++++++++ 16 files changed, 779 insertions(+), 457 deletions(-) create mode 100644 src/unit/README.md create mode 100644 src/unit/test_crc64.c create mode 100644 src/unit/test_crc64combine.c create mode 100644 src/unit/test_files.h create mode 100644 src/unit/test_help.h create mode 100644 src/unit/test_intset.c create mode 100644 src/unit/test_main.c create mode 100755 utils/generate-unit-test-header.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 137bd195c7..c94893320b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: - name: make # Fail build if there are warnings # build with TLS just for compilation coverage - run: make SERVER_CFLAGS='-Werror' BUILD_TLS=yes + run: make all-with-unit-tests SERVER_CFLAGS='-Werror' BUILD_TLS=yes - name: test run: | sudo apt-get install tcl8.6 tclx @@ -27,6 +27,9 @@ jobs: make commands.def dirty=$(git diff) if [[ ! -z $dirty ]]; then echo $dirty; exit 1; fi + - name: unit tests + run: | + ./src/valkey-unit-tests test-sanitizer-address: runs-on: ubuntu-latest diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 178d9b7ceb..f4726a420e 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -54,7 +54,7 @@ jobs: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: make - run: make SERVER_CFLAGS='-Werror -DSERVER_TEST' + run: make all-with-unit-tests SERVER_CFLAGS='-Werror -DSERVER_TEST' - name: testprep run: sudo apt-get install tcl8.6 tclx - name: test @@ -69,9 +69,12 @@ jobs: - name: cluster tests if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: unittest + - name: legacy unit tests if: true && !contains(github.event.inputs.skiptests, 'unittest') run: ./src/valkey-server test all --accurate + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-unit-tests --accurate test-ubuntu-jemalloc-fortify: runs-on: ubuntu-latest @@ -113,9 +116,12 @@ jobs: - name: cluster tests if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: unittest + - name: legacy unit tests if: true && !contains(github.event.inputs.skiptests, 'unittest') run: ./src/valkey-server test all --accurate + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: make valkey-unit-tests && ./src/valkey-unit-tests --accurate test-ubuntu-libc-malloc: runs-on: ubuntu-latest @@ -231,9 +237,12 @@ jobs: - name: cluster tests if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: unittest + - name: legacy unit tests if: true && !contains(github.event.inputs.skiptests, 'unittest') run: ./src/valkey-server test all --accurate + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-unit-tests --accurate test-ubuntu-tls: runs-on: ubuntu-latest @@ -589,7 +598,7 @@ jobs: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: make - run: make SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror -DDEBUG_ASSERTIONS' + run: make all-with-unit-tests SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror -DDEBUG_ASSERTIONS' - name: testprep # Work around ASAN issue, see https://github.com/google/sanitizers/issues/1716 run: | @@ -608,9 +617,12 @@ jobs: - name: cluster tests if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: unittest + - name: legacy unit tests if: true && !contains(github.event.inputs.skiptests, 'unittest') run: ./src/valkey-server test all + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-unit-tests test-sanitizer-undefined: runs-on: ubuntu-latest @@ -638,7 +650,7 @@ jobs: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: make - run: make SANITIZER=undefined SERVER_CFLAGS='-DSERVER_TEST -Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations + run: make all-with-unit-tests SANITIZER=undefined SERVER_CFLAGS='-DSERVER_TEST -Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations - name: testprep run: | sudo apt-get update @@ -655,9 +667,12 @@ jobs: - name: cluster tests if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} - - name: unittest + - name: legacy unit tests if: true && !contains(github.event.inputs.skiptests, 'unittest') run: ./src/valkey-server test all --accurate + - name: new unit tests + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: ./src/valkey-unit-tests --accurate test-centos7-jemalloc: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 920e32eca7..8ed98aa326 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ .*.swp *.o +*.a *.xo *.so *.d @@ -12,6 +13,7 @@ dump.rdb *-cli *-sentinel *-server +*-unit-tests doc-tools release misc/* diff --git a/src/Makefile b/src/Makefile index 833349839f..908aa695de 100644 --- a/src/Makefile +++ b/src/Makefile @@ -98,6 +98,15 @@ ifeq ($(USE_JEMALLOC),no) MALLOC=libc endif +# Some unit tests compile files a second time to get access to static functions, the "--allow-multiple-definition" flag +# allows us to do that without an error, by using the first instance of function. This behavior can also be used +# to tweak behavior of code just for unit tests. The version of ld on MacOS apparently always does this. +ifneq ($(uname_S),Darwin) + ALLOW_DUPLICATE_FLAG=-Wl,--allow-multiple-definition +else + ALLOW_DUPLICATE_FLAG= +endif + ifdef SANITIZER ifeq ($(SANITIZER),address) MALLOC=libc @@ -357,6 +366,7 @@ else endif SERVER_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) +SERVER_AR=$(QUIET_AR)$(AR) SERVER_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS) ENGINE_INSTALL=$(QUIET_INSTALL)$(INSTALL) @@ -372,6 +382,7 @@ QUIET_CC = @printf ' %b %b\n' $(CCCOLOR)CC$(ENDCOLOR) $(SRCCOLOR)$@$(ENDCOLOR QUIET_GEN = @printf ' %b %b\n' $(CCCOLOR)GEN$(ENDCOLOR) $(SRCCOLOR)$@$(ENDCOLOR) 1>&2; QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) 1>&2; QUIET_INSTALL = @printf ' %b %b\n' $(LINKCOLOR)INSTALL$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) 1>&2; +QUIET_AR = @printf ' %b %b\n' $(CCCOLOR)ARCHIVE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) 1>&2; endif ifneq (, $(findstring LOG_REQ_RES, $(SERVER_CFLAGS))) @@ -392,6 +403,10 @@ ENGINE_BENCHMARK_NAME=$(ENGINE_NAME)-benchmark$(PROG_SUFFIX) ENGINE_BENCHMARK_OBJ=ae.o anet.o valkey-benchmark.o adlist.o dict.o zmalloc.o serverassert.o release.o crcspeed.o crccombine.o crc64.o siphash.o crc16.o monotonic.o cli_common.o mt19937-64.o strl.o ENGINE_CHECK_RDB_NAME=$(ENGINE_NAME)-check-rdb$(PROG_SUFFIX) ENGINE_CHECK_AOF_NAME=$(ENGINE_NAME)-check-aof$(PROG_SUFFIX) +ENGINE_LIB_NAME=lib$(ENGINE_NAME).a +ENGINE_TEST_FILES:=$(wildcard unit/*.c) +ENGINE_TEST_OBJ:=$(sort $(patsubst unit/%.c,unit/%.o,$(ENGINE_TEST_FILES))) +ENGINE_UNIT_TESTS:=$(ENGINE_NAME)-unit-tests$(PROG_SUFFIX) ALL_SOURCES=$(sort $(patsubst %.o,%.c,$(ENGINE_SERVER_OBJ) $(ENGINE_CLI_OBJ) $(ENGINE_BENCHMARK_OBJ))) all: $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(TLS_MODULE) @@ -408,6 +423,9 @@ endif .PHONY: all +all-with-unit-tests: all $(ENGINE_UNIT_TESTS) +.PHONY: all + persist-settings: distclean echo STD=$(STD) >> .make-settings echo WARN=$(WARN) >> .make-settings @@ -442,6 +460,14 @@ endif $(SERVER_NAME): $(ENGINE_SERVER_OBJ) $(SERVER_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a ../deps/hdr_histogram/libhdrhistogram.a ../deps/fpconv/libfpconv.a $(FINAL_LIBS) +# Valkey static library, used to compile against for unit testing +$(ENGINE_LIB_NAME): $(ENGINE_SERVER_OBJ) + $(SERVER_AR) rcs $@ $^ + +# valkey-unit-tests +$(ENGINE_UNIT_TESTS): $(ENGINE_TEST_OBJ) $(ENGINE_LIB_NAME) + $(SERVER_LD) $(ALLOW_DUPLICATE_FLAG) -o $@ $^ ../deps/fpconv/libfpconv.a $(FINAL_LIBS) + # valkey-sentinel $(ENGINE_SENTINEL_NAME): $(SERVER_NAME) $(ENGINE_INSTALL) $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) @@ -475,6 +501,9 @@ DEP = $(ENGINE_SERVER_OBJ:%.o=%.d) $(ENGINE_CLI_OBJ:%.o=%.d) $(ENGINE_BENCHMARK_ %.o: %.c .make-prerequisites $(SERVER_CC) -MMD -o $@ -c $< +unit/%.o: unit/%.c .make-prerequisites + $(SERVER_CC) -MMD -o $@ -c $< + # The following files are checked in and don't normally need to be rebuilt. They # are built only if python is available and their prereqs are modified. ifneq (,$(PYTHON)) @@ -485,12 +514,17 @@ fmtargs.h: ../utils/generate-fmtargs.py $(QUITE_GEN)sed '/Everything below this line/,$$d' $@ > $@.tmp $(QUITE_GEN)$(PYTHON) ../utils/generate-fmtargs.py >> $@.tmp $(QUITE_GEN)mv $@.tmp $@ + +unit/test_files.h: unit/*.c ../utils/generate-unit-test-header.py + $(QUIET_GEN)$(PYTHON) ../utils/generate-unit-test-header.py + +unit/test_main.o: unit/test_files.h endif commands.c: $(COMMANDS_DEF_FILENAME).def clean: - rm -rf $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov valkey.info lcov-html Makefile.dep *.so + rm -rf $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(ENGINE_UNIT_TESTS) $(ENGINE_LIB_NAME) unit/*.o unit/*.d *.o *.gcda *.gcno *.gcov valkey.info lcov-html Makefile.dep *.so rm -f $(DEP) .PHONY: clean @@ -506,6 +540,9 @@ distclean: clean test: $(SERVER_NAME) $(ENGINE_CHECK_AOF_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) @(cd ..; ./runtest) +test-unit: $(ENGINE_UNIT_TESTS) + ./$(ENGINE_UNIT_TESTS) + test-modules: $(SERVER_NAME) @(cd ..; ./runtest-moduleapi) @@ -533,7 +570,7 @@ bench: $(ENGINE_BENCHMARK_NAME) @echo "" @echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386" @echo "" - $(MAKE) CFLAGS="-m32" LDFLAGS="-m32" + $(MAKE) all-with-unit-tests CFLAGS="-m32" LDFLAGS="-m32" gcov: $(MAKE) SERVER_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" SERVER_LDFLAGS="-fprofile-arcs -ftest-coverage" diff --git a/src/crc64.c b/src/crc64.c index 9d4e98ee70..97b28250a5 100644 --- a/src/crc64.c +++ b/src/crc64.c @@ -141,226 +141,3 @@ void crc64_init(void) { uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l) { return crcspeed64native(crc64_table, crc, (void *) s, l); } - -/* Test main */ -#ifdef SERVER_TEST -#include - -static void genBenchmarkRandomData(char *data, int count); -static int bench_crc64(unsigned char *data, uint64_t size, long long passes, uint64_t check, char *name, int csv); -static void bench_combine(char *label, uint64_t size, uint64_t expect, int csv); -long long _ustime(void); - -#include -#include -#include -#include -#include -#include - -#include "zmalloc.h" -#include "crccombine.h" - -long long _ustime(void) { - struct timeval tv; - long long ust; - - gettimeofday(&tv, NULL); - ust = ((long long)tv.tv_sec)*1000000; - ust += tv.tv_usec; - return ust; -} - -static int bench_crc64(unsigned char *data, uint64_t size, long long passes, uint64_t check, char *name, int csv) { - uint64_t min = size, hash; - long long original_start = _ustime(), original_end; - for (long long i=passes; i > 0; i--) { - hash = crc64(0, data, size); - } - original_end = _ustime(); - min = (original_end - original_start) * 1000 / passes; - /* approximate nanoseconds without nstime */ - if (csv) { - printf("%s,%" PRIu64 ",%" PRIu64 ",%d\n", - name, size, (1000 * size) / min, hash == check); - } else { - printf("test size=%" PRIu64 " algorithm=%s %" PRIu64 " M/sec matches=%d\n", - size, name, (1000 * size) / min, hash == check); - } - return hash != check; -} - -const uint64_t BENCH_RPOLY = UINT64_C(0x95ac9329ac4bc9b5); - -static void bench_combine(char *label, uint64_t size, uint64_t expect, int csv) { - uint64_t min = size, start = expect, thash = expect ^ (expect >> 17); - long long original_start = _ustime(), original_end; - for (int i=0; i < 1000; i++) { - crc64_combine(thash, start, size, BENCH_RPOLY, 64); - } - original_end = _ustime(); - /* ran 1000 times, want ns per, counted us per 1000 ... */ - min = original_end - original_start; - if (csv) { - printf("%s,%" PRIu64 ",%" PRIu64 "\n", label, size, min); - } else { - printf("%s size=%" PRIu64 " in %" PRIu64 " nsec\n", label, size, min); - } -} - -static void genBenchmarkRandomData(char *data, int count) { - static uint32_t state = 1234; - int i = 0; - - while (count--) { - state = (state*1103515245+12345); - data[i++] = '0'+((state>>16)&63); - } -} - -#define UNUSED(x) (void)(x) -int crc64Test(int argc, char *argv[], int flags) { - UNUSED(flags); - - uint64_t crc64_test_size = 0; - int i, lastarg, csv = 0, loop = 0, combine = 0; -again: - for (i = 3; i < argc; i++) { - lastarg = (i == (argc-1)); - if (!strcmp(argv[i],"--help")) { - goto usage; - } else if (!strcmp(argv[i],"--csv")) { - csv = 1; - } else if (!strcmp(argv[i],"-l")) { - loop = 1; - } else if (!strcmp(argv[i],"--crc")) { - if (lastarg) goto invalid; - crc64_test_size = atoll(argv[++i]); - } else if (!strcmp(argv[i],"--combine")) { - combine = 1; - } else { -invalid: - printf("Invalid option \"%s\" or option argument missing\n\n",argv[i]); -usage: - printf( -"Usage: crc64 [OPTIONS]\n\n" -" --csv Output in CSV format\n" -" -l Loop. Run the tests forever\n" -" --crc Benchmark crc64 faster options, using a buffer this big, and quit when done.\n" -" --combine Benchmark crc64 combine value ranges and timings.\n" - ); - return 1; - } - } - - if (crc64_test_size == 0 && combine == 0) { - crc64_init(); - printf("[calcula]: e9c6d914c4b8d9ca == %016" PRIx64 "\n", - (uint64_t)_crc64(0, "123456789", 9)); - printf("[64speed]: e9c6d914c4b8d9ca == %016" PRIx64 "\n", - (uint64_t)crc64(0, (unsigned char*)"123456789", 9)); - char li[] = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed " - "do eiusmod tempor incididunt ut labore et dolore magna " - "aliqua. Ut enim ad minim veniam, quis nostrud exercitation " - "ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis " - "aute irure dolor in reprehenderit in voluptate velit esse " - "cillum dolore eu fugiat nulla pariatur. Excepteur sint " - "occaecat cupidatat non proident, sunt in culpa qui officia " - "deserunt mollit anim id est laborum."; - printf("[calcula]: c7794709e69683b3 == %016" PRIx64 "\n", - (uint64_t)_crc64(0, li, sizeof(li))); - printf("[64speed]: c7794709e69683b3 == %016" PRIx64 "\n", - (uint64_t)crc64(0, (unsigned char*)li, sizeof(li))); - return 0; - - } - - int init_this_loop = 1; - long long init_start, init_end; - - do { - unsigned char* data = NULL; - uint64_t passes = 0; - if (crc64_test_size) { - data = zmalloc(crc64_test_size); - genBenchmarkRandomData((char*)data, crc64_test_size); - /* We want to hash about 1 gig of data in total, looped, to get a good - * idea of our performance. - */ - passes = (UINT64_C(0x100000000) / crc64_test_size); - passes = passes >= 2 ? passes : 2; - passes = passes <= 1000 ? passes : 1000; - } - - crc64_init(); - /* warm up the cache */ - set_crc64_cutoffs(crc64_test_size+1, crc64_test_size+1); - uint64_t expect = crc64(0, data, crc64_test_size); - - if (!combine && crc64_test_size) { - if (csv && init_this_loop) printf("algorithm,buffer,performance,crc64_matches\n"); - - /* get the single-character version for single-byte Redis behavior */ - set_crc64_cutoffs(0, crc64_test_size+1); - if (bench_crc64(data, crc64_test_size, passes, expect, "crc_1byte", csv)) return 1; - - set_crc64_cutoffs(crc64_test_size+1, crc64_test_size+1); - /* run with 8-byte "single" path, crcfaster */ - if (bench_crc64(data, crc64_test_size, passes, expect, "crcspeed", csv)) return 1; - - /* run with dual 8-byte paths */ - set_crc64_cutoffs(1, crc64_test_size+1); - if (bench_crc64(data, crc64_test_size, passes, expect, "crcdual", csv)) return 1; - - /* run with tri 8-byte paths */ - set_crc64_cutoffs(1, 1); - if (bench_crc64(data, crc64_test_size, passes, expect, "crctri", csv)) return 1; - - /* Be free memory region, be free. */ - zfree(data); - data = NULL; - } - - uint64_t INIT_SIZE = UINT64_C(0xffffffffffffffff); - if (combine) { - if (init_this_loop) { - init_start = _ustime(); - crc64_combine( - UINT64_C(0xdeadbeefdeadbeef), - UINT64_C(0xfeebdaedfeebdaed), - INIT_SIZE, - BENCH_RPOLY, 64); - init_end = _ustime(); - - init_end -= init_start; - init_end *= 1000; - if (csv) { - printf("operation,size,nanoseconds\n"); - printf("init_64,%" PRIu64 ",%" PRIu64 "\n", INIT_SIZE, (uint64_t)init_end); - } else { - printf("init_64 size=%" PRIu64 " in %" PRIu64 " nsec\n", INIT_SIZE, (uint64_t)init_end); - } - /* use the hash itself as the size (unpredictable) */ - bench_combine("hash_as_size_combine", crc64_test_size, expect, csv); - - /* let's do something big (predictable, so fast) */ - bench_combine("largest_combine", INIT_SIZE, expect, csv); - } - bench_combine("combine", crc64_test_size, expect, csv); - } - init_this_loop = 0; - /* step down by ~1.641 for a range of test sizes */ - crc64_test_size -= (crc64_test_size >> 2) + (crc64_test_size >> 3) + (crc64_test_size >> 6); - } while (crc64_test_size > 3); - if (loop) goto again; - return 0; -} -# endif - - -#ifdef SERVER_TEST_MAIN -int main(int argc, char *argv[]) { - return crc64Test(argc, argv); -} - -#endif diff --git a/src/crc64.h b/src/crc64.h index 3debc3295a..34015655c1 100644 --- a/src/crc64.h +++ b/src/crc64.h @@ -6,8 +6,4 @@ void crc64_init(void); uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l); -#ifdef SERVER_TEST -int crc64Test(int argc, char *argv[], int flags); -#endif - #endif diff --git a/src/intset.c b/src/intset.c index fea4173b43..058d0bfb30 100644 --- a/src/intset.c +++ b/src/intset.c @@ -341,220 +341,3 @@ int intsetValidateIntegrity(const unsigned char *p, size_t size, int deep) { return 1; } - -#ifdef SERVER_TEST -#include -#include - -#if 0 -static void intsetRepr(intset *is) { - for (uint32_t i = 0; i < intrev32ifbe(is->length); i++) { - printf("%lld\n", (uint64_t)_intsetGet(is,i)); - } - printf("\n"); -} - -static void error(char *err) { - printf("%s\n", err); - exit(1); -} -#endif - -static void ok(void) { - printf("OK\n"); -} - -static long long usec(void) { - struct timeval tv; - gettimeofday(&tv,NULL); - return (((long long)tv.tv_sec)*1000000)+tv.tv_usec; -} - -static intset *createSet(int bits, int size) { - uint64_t mask = (1< 32) { - value = (rand()*rand()) & mask; - } else { - value = rand() & mask; - } - is = intsetAdd(is,value,NULL); - } - return is; -} - -static void checkConsistency(intset *is) { - for (uint32_t i = 0; i < (intrev32ifbe(is->length)-1); i++) { - uint32_t encoding = intrev32ifbe(is->encoding); - - if (encoding == INTSET_ENC_INT16) { - int16_t *i16 = (int16_t*)is->contents; - assert(i16[i] < i16[i+1]); - } else if (encoding == INTSET_ENC_INT32) { - int32_t *i32 = (int32_t*)is->contents; - assert(i32[i] < i32[i+1]); - } else { - int64_t *i64 = (int64_t*)is->contents; - assert(i64[i] < i64[i+1]); - } - } -} - -#define UNUSED(x) (void)(x) -int intsetTest(int argc, char **argv, int flags) { - uint8_t success; - int i; - intset *is; - srand(time(NULL)); - - UNUSED(argc); - UNUSED(argv); - UNUSED(flags); - - printf("Value encodings: "); { - assert(_intsetValueEncoding(-32768) == INTSET_ENC_INT16); - assert(_intsetValueEncoding(+32767) == INTSET_ENC_INT16); - assert(_intsetValueEncoding(-32769) == INTSET_ENC_INT32); - assert(_intsetValueEncoding(+32768) == INTSET_ENC_INT32); - assert(_intsetValueEncoding(-2147483648) == INTSET_ENC_INT32); - assert(_intsetValueEncoding(+2147483647) == INTSET_ENC_INT32); - assert(_intsetValueEncoding(-2147483649) == INTSET_ENC_INT64); - assert(_intsetValueEncoding(+2147483648) == INTSET_ENC_INT64); - assert(_intsetValueEncoding(-9223372036854775808ull) == - INTSET_ENC_INT64); - assert(_intsetValueEncoding(+9223372036854775807ull) == - INTSET_ENC_INT64); - ok(); - } - - printf("Basic adding: "); { - is = intsetNew(); - is = intsetAdd(is,5,&success); assert(success); - is = intsetAdd(is,6,&success); assert(success); - is = intsetAdd(is,4,&success); assert(success); - is = intsetAdd(is,4,&success); assert(!success); - assert(6 == intsetMax(is)); - assert(4 == intsetMin(is)); - ok(); - zfree(is); - } - - printf("Large number of random adds: "); { - uint32_t inserts = 0; - is = intsetNew(); - for (i = 0; i < 1024; i++) { - is = intsetAdd(is,rand()%0x800,&success); - if (success) inserts++; - } - assert(intrev32ifbe(is->length) == inserts); - checkConsistency(is); - ok(); - zfree(is); - } - - printf("Upgrade from int16 to int32: "); { - is = intsetNew(); - is = intsetAdd(is,32,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,65535,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - assert(intsetFind(is,32)); - assert(intsetFind(is,65535)); - checkConsistency(is); - zfree(is); - - is = intsetNew(); - is = intsetAdd(is,32,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,-65535,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - assert(intsetFind(is,32)); - assert(intsetFind(is,-65535)); - checkConsistency(is); - ok(); - zfree(is); - } - - printf("Upgrade from int16 to int64: "); { - is = intsetNew(); - is = intsetAdd(is,32,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,4294967295,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - assert(intsetFind(is,32)); - assert(intsetFind(is,4294967295)); - checkConsistency(is); - zfree(is); - - is = intsetNew(); - is = intsetAdd(is,32,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); - is = intsetAdd(is,-4294967295,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - assert(intsetFind(is,32)); - assert(intsetFind(is,-4294967295)); - checkConsistency(is); - ok(); - zfree(is); - } - - printf("Upgrade from int32 to int64: "); { - is = intsetNew(); - is = intsetAdd(is,65535,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - is = intsetAdd(is,4294967295,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - assert(intsetFind(is,65535)); - assert(intsetFind(is,4294967295)); - checkConsistency(is); - zfree(is); - - is = intsetNew(); - is = intsetAdd(is,65535,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); - is = intsetAdd(is,-4294967295,NULL); - assert(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); - assert(intsetFind(is,65535)); - assert(intsetFind(is,-4294967295)); - checkConsistency(is); - ok(); - zfree(is); - } - - printf("Stress lookups: "); { - long num = 100000, size = 10000; - int i, bits = 20; - long long start; - is = createSet(bits,size); - checkConsistency(is); - - start = usec(); - for (i = 0; i < num; i++) intsetSearch(is,rand() % ((1<(int argc, char *argv[], int flags) {`, where test_name is the name of the test. +The test name must be globally unique. +A test will be marked as successful if returns 0, and will be marked failed in all other cases. + +The test framework also parses several flags passed in, and sets them based on the arguments to the tests. + +Tests flags: +* UNIT_TEST_ACCURATE: Corresponds to the --accurate flag. This flag indicates the test should use extra computation to more accurately validate the tests. +* UNIT_TEST_LARGE_MEMORY: Corresponds to the --large-memory flag. This flag indicates whether or not tests should use more than 100mb of memory. +* UNIT_TEST_SINGLE: Corresponds to the --single flag. This flag indicates that a single test is being executed. + +Tests are allowed to be passed in additional arbitrary argv/argc, which they can access from the argc and argv arguments of the test. + +## Assertions + +There are a few built in assertions that can be used, that will automatically return from the current function and return the correct error code. +Assertions are also useful as they will print out the line number that they failed on. + +* `TEST_ASSERT(condition)`: Will evaluate the condition, and if it fails it will return 1 and print out the condition that failed. +* `TEST_ASSERT_MESSAGE(message, condition`): Will evaluate the condition, and if it fails it will return 1 and print out the provided message. + +## Other utilities + +If you would like to print out additional data, use the `TEST_PRINT_INFO(info, ...)` option, which has arguments similar to printf. +This macro will also print out the function the code was executed from in addition to the line it was printed from. + +## Example test + +``` +int test_example(int argc, char *argv[], int flags) { + TEST_ASSERT(5 == 5); + TEST_ASSERT_MESSAGE("This should pass", 6 == 6); + return 0; +} +``` + +## Running tests +Tests can be run by executing: + +``` +make valkey-unit-tests +./valkey-unit-tests +``` + +Running a single unit test file +``` +./valkey-unit-tests --single crc64.c +``` + +Will just run the crc64.c file. \ No newline at end of file diff --git a/src/unit/test_crc64.c b/src/unit/test_crc64.c new file mode 100644 index 0000000000..9489a24625 --- /dev/null +++ b/src/unit/test_crc64.c @@ -0,0 +1,30 @@ +#include +#include "../crc64.h" + +#include "test_help.h" + +extern uint64_t _crc64(uint_fast64_t crc, const void *in_data, const uint64_t len); + +int test_crc64(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + crc64_init(); + + unsigned char numbers[] = "123456789"; + TEST_ASSERT_MESSAGE("[calcula]: CRC64 '123456789'", (uint64_t)_crc64(0, numbers, 9) == 16845390139448941002ull); + TEST_ASSERT_MESSAGE("[calcula]: CRC64 '123456789'", (uint64_t)crc64(0, numbers, 9) == 16845390139448941002ull); + + unsigned char li[] = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed " + "do eiusmod tempor incididunt ut labore et dolore magna " + "aliqua. Ut enim ad minim veniam, quis nostrud exercitation " + "ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis " + "aute irure dolor in reprehenderit in voluptate velit esse " + "cillum dolore eu fugiat nulla pariatur. Excepteur sint " + "occaecat cupidatat non proident, sunt in culpa qui officia " + "deserunt mollit anim id est laborum."; + + TEST_ASSERT_MESSAGE("[calcula]: CRC64 TEXT'", (uint64_t)_crc64(0, li, sizeof(li)) == 14373597793578550195ull); + TEST_ASSERT_MESSAGE("[calcula]: CRC64 TEXT", (uint64_t)crc64(0, li, sizeof(li)) == 14373597793578550195ull); + return 0; +} diff --git a/src/unit/test_crc64combine.c b/src/unit/test_crc64combine.c new file mode 100644 index 0000000000..ed5c39d97f --- /dev/null +++ b/src/unit/test_crc64combine.c @@ -0,0 +1,193 @@ +#include +#include +#include +#include +#include +#include + +#include "test_help.h" +#include "../zmalloc.h" +#include "../crc64.h" +#include "../crcspeed.h" +#include "../crccombine.h" + +static void genBenchmarkRandomData(char *data, int count); +static int bench_crc64(unsigned char *data, uint64_t size, long long passes, uint64_t check, char *name, int csv); +static void bench_combine(char *label, uint64_t size, uint64_t expect, int csv); + +long long _ustime(void) { + struct timeval tv; + long long ust; + + gettimeofday(&tv, NULL); + ust = ((long long)tv.tv_sec)*1000000; + ust += tv.tv_usec; + return ust; +} + +static int bench_crc64(unsigned char *data, uint64_t size, long long passes, uint64_t check, char *name, int csv) { + uint64_t min = size, hash; + long long original_start = _ustime(), original_end; + for (long long i=passes; i > 0; i--) { + hash = crc64(0, data, size); + } + original_end = _ustime(); + min = (original_end - original_start) * 1000 / passes; + /* approximate nanoseconds without nstime */ + if (csv) { + printf("%s,%" PRIu64 ",%" PRIu64 ",%d\n", + name, size, (1000 * size) / min, hash == check); + } else { + TEST_PRINT_INFO("test size=%" PRIu64 " algorithm=%s %" PRIu64 " M/sec matches=%d", + size, name, (1000 * size) / min, hash == check); + } + return hash != check; +} + +const uint64_t BENCH_RPOLY = UINT64_C(0x95ac9329ac4bc9b5); + +static void bench_combine(char *label, uint64_t size, uint64_t expect, int csv) { + uint64_t min = size, start = expect, thash = expect ^ (expect >> 17); + long long original_start = _ustime(), original_end; + for (int i=0; i < 1000; i++) { + crc64_combine(thash, start, size, BENCH_RPOLY, 64); + } + original_end = _ustime(); + /* ran 1000 times, want ns per, counted us per 1000 ... */ + min = original_end - original_start; + if (csv) { + printf("%s,%" PRIu64 ",%" PRIu64 "\n", label, size, min); + } else { + printf("%s size=%" PRIu64 " in %" PRIu64 " nsec\n", label, size, min); + } +} + +static void genBenchmarkRandomData(char *data, int count) { + static uint32_t state = 1234; + int i = 0; + + while (count--) { + state = (state*1103515245+12345); + data[i++] = '0'+((state>>16)&63); + } +} + +/* This is a special unit test useful for benchmarking crc64combine performance. The + * benchmarking is only done when the tests are invoked with a single test target, + * like 'valkey-unit-tests --single test_crc64combine.c --crc 16384'. */ +int test_crc64combine(int argc, char **argv, int flags) { + if (!(flags & UNIT_TEST_SINGLE)) { + return 0; + } + + uint64_t crc64_test_size = 0; + int i, lastarg, csv = 0, loop = 0, combine = 0; +again: + for (i = 3; i < argc; i++) { + lastarg = (i == (argc-1)); + if (!strcmp(argv[i],"--help")) { + goto usage; + } else if (!strcmp(argv[i],"--csv")) { + csv = 1; + } else if (!strcmp(argv[i],"-l")) { + loop = 1; + } else if (!strcmp(argv[i],"--crc")) { + if (lastarg) goto invalid; + crc64_test_size = atoll(argv[++i]); + } else if (!strcmp(argv[i],"--combine")) { + combine = 1; + } else { +invalid: + printf("Invalid option \"%s\" or option argument missing\n\n",argv[i]); +usage: + printf( +"Usage: --single test_crc64combine.c [OPTIONS]\n\n" +" --csv Output in CSV format\n" +" -l Loop. Run the tests forever\n" +" --crc Benchmark crc64 faster options, using a buffer this big, and quit when done.\n" +" --combine Benchmark crc64 combine value ranges and timings.\n" + ); + return 1; + } + } + + int init_this_loop = 1; + long long init_start, init_end; + + do { + unsigned char* data = NULL; + uint64_t passes = 0; + if (crc64_test_size) { + data = zmalloc(crc64_test_size); + genBenchmarkRandomData((char*)data, crc64_test_size); + /* We want to hash about 1 gig of data in total, looped, to get a good + * idea of our performance. + */ + passes = (UINT64_C(0x100000000) / crc64_test_size); + passes = passes >= 2 ? passes : 2; + passes = passes <= 1000 ? passes : 1000; + } + + crc64_init(); + /* warm up the cache */ + set_crc64_cutoffs(crc64_test_size+1, crc64_test_size+1); + uint64_t expect = crc64(0, data, crc64_test_size); + + if (!combine && crc64_test_size) { + if (csv && init_this_loop) printf("algorithm,buffer,performance,crc64_matches\n"); + + /* get the single-character version for single-byte Redis behavior */ + set_crc64_cutoffs(0, crc64_test_size+1); + if (bench_crc64(data, crc64_test_size, passes, expect, "crc_1byte", csv)) return 1; + + set_crc64_cutoffs(crc64_test_size+1, crc64_test_size+1); + /* run with 8-byte "single" path, crcfaster */ + if (bench_crc64(data, crc64_test_size, passes, expect, "crcspeed", csv)) return 1; + + /* run with dual 8-byte paths */ + set_crc64_cutoffs(1, crc64_test_size+1); + if (bench_crc64(data, crc64_test_size, passes, expect, "crcdual", csv)) return 1; + + /* run with tri 8-byte paths */ + set_crc64_cutoffs(1, 1); + if (bench_crc64(data, crc64_test_size, passes, expect, "crctri", csv)) return 1; + + /* Be free memory region, be free. */ + zfree(data); + data = NULL; + } + + uint64_t INIT_SIZE = UINT64_C(0xffffffffffffffff); + if (combine) { + if (init_this_loop) { + init_start = _ustime(); + crc64_combine( + UINT64_C(0xdeadbeefdeadbeef), + UINT64_C(0xfeebdaedfeebdaed), + INIT_SIZE, + BENCH_RPOLY, 64); + init_end = _ustime(); + + init_end -= init_start; + init_end *= 1000; + if (csv) { + printf("operation,size,nanoseconds\n"); + printf("init_64,%" PRIu64 ",%" PRIu64 "\n", INIT_SIZE, (uint64_t)init_end); + } else { + TEST_PRINT_INFO("init_64 size=%" PRIu64 " in %" PRIu64 " nsec", INIT_SIZE, (uint64_t)init_end); + } + /* use the hash itself as the size (unpredictable) */ + bench_combine("hash_as_size_combine", crc64_test_size, expect, csv); + + /* let's do something big (predictable, so fast) */ + bench_combine("largest_combine", INIT_SIZE, expect, csv); + } + bench_combine("combine", crc64_test_size, expect, csv); + } + init_this_loop = 0; + /* step down by ~1.641 for a range of test sizes */ + crc64_test_size -= (crc64_test_size >> 2) + (crc64_test_size >> 3) + (crc64_test_size >> 6); + } while (crc64_test_size > 3); + if (loop) goto again; + return 0; +} diff --git a/src/unit/test_files.h b/src/unit/test_files.h new file mode 100644 index 0000000000..ab2f2f6267 --- /dev/null +++ b/src/unit/test_files.h @@ -0,0 +1,31 @@ +/* Do not modify this file, it's automatically generated from utils/generate-unit-test-header.py */ +typedef int unitTestProc(int argc, char **argv, int flags); + +typedef struct unitTest { + char *name; + unitTestProc *proc; +} unitTest; + +int test_crc64(int argc, char **argv, int flags); +int test_crc64combine(int argc, char **argv, int flags); +int test_intsetValueEncodings(int argc, char **argv, int flags); +int test_intsetBasicAdding(int argc, char **argv, int flags); +int test_intsetLargeNumberRandomAdd(int argc, char **argv, int flags); +int test_intsetUpgradeFromint16Toint32(int argc, char **argv, int flags); +int test_intsetUpgradeFromint16Toint64(int argc, char **argv, int flags); +int test_intsetUpgradeFromint32Toint64(int argc, char **argv, int flags); +int test_intsetStressLookups(int argc, char **argv, int flags); +int test_intsetStressAddDelete(int argc, char **argv, int flags); + +unitTest __test_crc64_c[] = {{"test_crc64", test_crc64}, {NULL, NULL}}; +unitTest __test_crc64combine_c[] = {{"test_crc64combine", test_crc64combine}, {NULL, NULL}}; +unitTest __test_intset_c[] = {{"test_intsetValueEncodings", test_intsetValueEncodings}, {"test_intsetBasicAdding", test_intsetBasicAdding}, {"test_intsetLargeNumberRandomAdd", test_intsetLargeNumberRandomAdd}, {"test_intsetUpgradeFromint16Toint32", test_intsetUpgradeFromint16Toint32}, {"test_intsetUpgradeFromint16Toint64", test_intsetUpgradeFromint16Toint64}, {"test_intsetUpgradeFromint32Toint64", test_intsetUpgradeFromint32Toint64}, {"test_intsetStressLookups", test_intsetStressLookups}, {"test_intsetStressAddDelete", test_intsetStressAddDelete}, {NULL, NULL}}; + +struct unitTestSuite { + char *filename; + unitTest *tests; +} unitTestSuite[] = { + {"test_crc64.c", __test_crc64_c}, + {"test_crc64combine.c", __test_crc64combine_c}, + {"test_intset.c", __test_intset_c}, +}; diff --git a/src/unit/test_help.h b/src/unit/test_help.h new file mode 100644 index 0000000000..6fc5b4c378 --- /dev/null +++ b/src/unit/test_help.h @@ -0,0 +1,48 @@ +/* A very simple test framework for valkey. See unit/README.me for more information on usage. + * + * Example: + * + * int test_example(int argc, char *argv[], int flags) { + * TEST_ASSERT_MESSAGE("Check if 1 == 1", 1==1); + * TEST_ASSERT(5 == 5); + * return 0; + * } + */ + +#ifndef __TESTHELP_H +#define __TESTHELP_H + +#include +#include + +/* The flags are the following: +* --accurate: Runs tests with more iterations. +* --large-memory: Enables tests that consume more than 100mb. +* --single: A flag to indicate a specific test file was executed. */ +#define UNIT_TEST_ACCURATE (1<<0) +#define UNIT_TEST_LARGE_MEMORY (1<<1) +#define UNIT_TEST_SINGLE (1<<2) + +#define KRED "\33[31m" +#define KGRN "\33[32m" +#define KBLUE "\33[34m" +#define KRESET "\33[0m" + +#define TEST_PRINT_ERROR(descr) \ + printf("[" KRED "%s - %s:%d" KRESET "] %s\n", __func__, __FILE__, __LINE__, descr) + +#define TEST_PRINT_INFO(descr, ...) \ + printf("[" KBLUE "%s - %s:%d" KRESET "] " descr "\n", __func__, __FILE__, __LINE__, __VA_ARGS__) + +#define TEST_ASSERT_MESSAGE(descr, _c) do { \ + if (!(_c)) { \ + TEST_PRINT_ERROR(descr); \ + return 1; \ + } \ +} while(0) + +#define TEST_ASSERT(_c) TEST_ASSERT_MESSAGE("Failed assertion: " #_c, _c) + +#define UNUSED(x) (void)(x) + +#endif diff --git a/src/unit/test_intset.c b/src/unit/test_intset.c new file mode 100644 index 0000000000..9e23101dff --- /dev/null +++ b/src/unit/test_intset.c @@ -0,0 +1,229 @@ +#include +#include + +#include "../intset.c" +#include "test_help.h" + +static long long usec(void) { + struct timeval tv; + gettimeofday(&tv,NULL); + return (((long long)tv.tv_sec)*1000000)+tv.tv_usec; +} + +static intset *createSet(int bits, int size) { + uint64_t mask = (1< 32) { + value = (rand()*rand()) & mask; + } else { + value = rand() & mask; + } + is = intsetAdd(is,value,NULL); + } + return is; +} + +static int checkConsistency(intset *is) { + for (uint32_t i = 0; i < (intrev32ifbe(is->length)-1); i++) { + uint32_t encoding = intrev32ifbe(is->encoding); + + if (encoding == INTSET_ENC_INT16) { + int16_t *i16 = (int16_t*)is->contents; + TEST_ASSERT(i16[i] < i16[i+1]); + } else if (encoding == INTSET_ENC_INT32) { + int32_t *i32 = (int32_t*)is->contents; + TEST_ASSERT(i32[i] < i32[i+1]); + } else { + int64_t *i64 = (int64_t*)is->contents; + TEST_ASSERT(i64[i] < i64[i+1]); + } + } + return 1; +} + +int test_intsetValueEncodings(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + TEST_ASSERT(_intsetValueEncoding(-32768) == INTSET_ENC_INT16); + TEST_ASSERT(_intsetValueEncoding(+32767) == INTSET_ENC_INT16); + TEST_ASSERT(_intsetValueEncoding(-32769) == INTSET_ENC_INT32); + TEST_ASSERT(_intsetValueEncoding(+32768) == INTSET_ENC_INT32); + TEST_ASSERT(_intsetValueEncoding(-2147483648) == INTSET_ENC_INT32); + TEST_ASSERT(_intsetValueEncoding(+2147483647) == INTSET_ENC_INT32); + TEST_ASSERT(_intsetValueEncoding(-2147483649) == INTSET_ENC_INT64); + TEST_ASSERT(_intsetValueEncoding(+2147483648) == INTSET_ENC_INT64); + TEST_ASSERT(_intsetValueEncoding(-9223372036854775808ull) == + INTSET_ENC_INT64); + TEST_ASSERT(_intsetValueEncoding(+9223372036854775807ull) == + INTSET_ENC_INT64); + + return 0; +} + +int test_intsetBasicAdding(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + intset *is = intsetNew(); + uint8_t success; + is = intsetAdd(is,5,&success); TEST_ASSERT(success); + is = intsetAdd(is,6,&success); TEST_ASSERT(success); + is = intsetAdd(is,4,&success); TEST_ASSERT(success); + is = intsetAdd(is,4,&success); TEST_ASSERT(!success); + TEST_ASSERT(6 == intsetMax(is)); + TEST_ASSERT(4 == intsetMin(is)); + zfree(is); + + return 0; +} + +int test_intsetLargeNumberRandomAdd(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + uint32_t inserts = 0; + uint8_t success; + intset *is = intsetNew(); + for (int i = 0; i < 1024; i++) { + is = intsetAdd(is,rand()%0x800,&success); + if (success) inserts++; + } + TEST_ASSERT(intrev32ifbe(is->length) == inserts); + TEST_ASSERT(checkConsistency(is) == 1); + zfree(is); + return 0; +} + +int test_intsetUpgradeFromint16Toint32(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + intset *is = intsetNew(); + is = intsetAdd(is,32,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); + is = intsetAdd(is,65535,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); + TEST_ASSERT(intsetFind(is,32)); + TEST_ASSERT(intsetFind(is,65535)); + TEST_ASSERT(checkConsistency(is) == 1); + zfree(is); + + is = intsetNew(); + is = intsetAdd(is,32,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); + is = intsetAdd(is,-65535,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); + TEST_ASSERT(intsetFind(is,32)); + TEST_ASSERT(intsetFind(is,-65535)); + TEST_ASSERT(checkConsistency(is) == 1); + zfree(is); + + return 0; +} + +int test_intsetUpgradeFromint16Toint64(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + intset *is = intsetNew(); + is = intsetNew(); + is = intsetAdd(is,32,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); + is = intsetAdd(is,4294967295,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); + TEST_ASSERT(intsetFind(is,32)); + TEST_ASSERT(intsetFind(is,4294967295)); + TEST_ASSERT(checkConsistency(is) == 1); + zfree(is); + + is = intsetNew(); + is = intsetAdd(is,32,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); + is = intsetAdd(is,-4294967295,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); + TEST_ASSERT(intsetFind(is,32)); + TEST_ASSERT(intsetFind(is,-4294967295)); + TEST_ASSERT(checkConsistency(is) == 1); + zfree(is); + + return 0; +} + +int test_intsetUpgradeFromint32Toint64(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + intset *is = intsetNew(); + is = intsetAdd(is,65535,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); + is = intsetAdd(is,4294967295,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); + TEST_ASSERT(intsetFind(is,65535)); + TEST_ASSERT(intsetFind(is,4294967295)); + TEST_ASSERT(checkConsistency(is) == 1); + zfree(is); + + is = intsetNew(); + is = intsetAdd(is,65535,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT32); + is = intsetAdd(is,-4294967295,NULL); + TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT64); + TEST_ASSERT(intsetFind(is,65535)); + TEST_ASSERT(intsetFind(is,-4294967295)); + TEST_ASSERT(checkConsistency(is) == 1); + zfree(is); + + return 0; +} + +int test_intsetStressLookups(int argc, char **argv, int flags) { + UNUSED(argc); + UNUSED(argv); + UNUSED(flags); + + long num = 100000, size = 10000; + int i, bits = 20; + long long start; + intset *is = createSet(bits,size); + TEST_ASSERT(checkConsistency(is) == 1); + + start = usec(); + for (i = 0; i < num; i++) intsetSearch(is,rand() % ((1< +#include +#include "test_files.h" +#include "test_help.h" + +/* We override the default assertion mechanism, so that it prints out info and then dies. */ +void _serverAssert(const char *estr, const char *file, int line) { + printf("[" KRED "serverAssert - %s:%d" KRESET "] - %s\n", file, line, estr); + exit(1); +} + +/* Run the tests defined by the test suite. */ +int runTestSuite(struct unitTestSuite *test, int argc, char **argv, int flags) { + int test_num = 0; + int failed_tests = 0; + printf("[" KBLUE "START" KRESET "] - %s\n", test->filename); + + for (int id = 0; test->tests[id].proc != NULL; id++) { + test_num++; + int test_result = (test->tests[id].proc(argc, argv, flags) != 0); + if (!test_result) { + printf("[" KGRN "ok" KRESET "] - %s:%s\n", test->filename, test->tests[id].name); + } else { + printf("[" KRED "fail" KRESET "] - %s:%s\n", test->filename, test->tests[id].name); + failed_tests++; + } + } + + printf("[" KBLUE "END" KRESET "] - %s: ", test->filename); + printf("%d tests, %d passed, %d failed\n", test_num, + test_num - failed_tests, failed_tests); + return !failed_tests; +} + +int main(int argc, char **argv) { + int flags = 0; + char *file = NULL; + for (int j = 1; j < argc; j++) { + char *arg = argv[j]; + if (!strcasecmp(arg, "--accurate")) flags |= UNIT_TEST_ACCURATE; + else if (!strcasecmp(arg, "--large-memory")) flags |= UNIT_TEST_LARGE_MEMORY; + else if (!strcasecmp(arg, "--single") && (j + 1 < argc)) { + flags |= UNIT_TEST_SINGLE; + file = argv[j + 1]; + } + } + + int numtests = sizeof(unitTestSuite)/sizeof(struct unitTest); + int failed_num = 0, suites_executed = 0; + for (int j = 0; j < numtests; j++) { + if (file && strcasecmp(file, unitTestSuite[j].filename)) continue; + if (!runTestSuite(&unitTestSuite[j], argc, argv, flags)) { + failed_num++; + } + suites_executed++; + } + printf("%d test suites executed, %d passed, %d failed\n", suites_executed, + suites_executed-failed_num, failed_num); + + return failed_num == 0 ? 0 : 1; +} diff --git a/utils/generate-unit-test-header.py b/utils/generate-unit-test-header.py new file mode 100755 index 0000000000..00cd852f2d --- /dev/null +++ b/utils/generate-unit-test-header.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +import os +import re + +UNIT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../src/unit') +TEST_FILE = UNIT_DIR + '/test_files.h' +TEST_PROTOTYPE = '(int (test_[a-zA-Z0-9_]*)\(.*\)).*{' + +if __name__ == '__main__': + with open(TEST_FILE, 'w') as output: + # Find each test file and collect the test names. + test_suites = [] + for root, dirs, files in os.walk(UNIT_DIR): + for file in files: + file_path = UNIT_DIR + '/' + file + if not file.endswith('.c') or file == 'test_main.c': + continue + tests = [] + with open(file_path, 'r') as f: + for line in f: + match = re.match(TEST_PROTOTYPE, line) + if match: + function = match.group(1) + test_name = match.group(2) + tests.append((test_name, function)) + test_suites.append({'file': file, 'tests': tests}) + test_suites.sort(key=lambda test_suite: test_suite['file']) + output.write("""/* Do not modify this file, it's automatically generated from utils/generate-unit-test-header.py */ +typedef int unitTestProc(int argc, char **argv, int flags); + +typedef struct unitTest { + char *name; + unitTestProc *proc; +} unitTest; + +""") + + # Write the headers for the functions + for test_suite in test_suites: + for test in test_suite['tests']: + output.write('{};\n'.format(test[1])) + output.write("\n") + + # Create test suite lists + for test_suite in test_suites: + output.write('unitTest __{}[] = {{'.format(test_suite['file'].replace('.c', '_c'))) + for test in test_suite['tests']: + output.write('{{"{}", {}}}, '.format(test[0], test[0])) + output.write('{NULL, NULL}};\n') + + output.write(""" +struct unitTestSuite { + char *filename; + unitTest *tests; +} unitTestSuite[] = { +""") + for test_suite in test_suites: + output.write(' {{"{0}", __{1}}},\n'.format(test_suite['file'], test_suite['file'].replace('.c', '_c'))) + output.write('};\n') \ No newline at end of file From 8f974484edea7d001573c015d0387b15801a0eca Mon Sep 17 00:00:00 2001 From: Ted Lyngmo Date: Fri, 3 May 2024 20:10:33 +0200 Subject: [PATCH 02/10] Log the real reason for why posix_fadvise failed (#430) `reclaimFilePageCache` did not set `errno` but `rdbSaveInternal` which is logging the error assumed it did. This makes sure `errno` is set. Signed-off-by: Ted Lyngmo --- src/util.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/util.c b/src/util.c index 6870fc8ed8..b493d26dab 100644 --- a/src/util.c +++ b/src/util.c @@ -1176,7 +1176,10 @@ int fsyncFileDir(const char *filename) { int reclaimFilePageCache(int fd, size_t offset, size_t length) { #ifdef HAVE_FADVISE int ret = posix_fadvise(fd, offset, length, POSIX_FADV_DONTNEED); - if (ret) return -1; + if (ret) { + errno = ret; + return -1; + } return 0; #else UNUSED(fd); From 3b256a5af07a5e34f359e910fa66cbcfaf5a3c79 Mon Sep 17 00:00:00 2001 From: pshankinclarke <38367844+pshankinclarke@users.noreply.github.com> Date: Fri, 3 May 2024 11:13:10 -0700 Subject: [PATCH 03/10] Improve TLS.md configuration instructions (#385) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Parker Shankin-Clarke Co-authored-by: Viktor Söderqvist --- TLS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TLS.md b/TLS.md index 09c5866abd..4ac61f406b 100644 --- a/TLS.md +++ b/TLS.md @@ -58,7 +58,7 @@ To connect to this Valkey server with `valkey-cli`: --key ./tests/tls/valkey.key \ --cacert ./tests/tls/ca.crt -This will disable TCP and enable TLS on port 6379. It's also possible to have +Specifying `port 0` will disable TCP. It's also possible to have both TCP and TLS available, but you'll need to assign different ports. To make a Replica connect to the master using TLS, use `--tls-replication yes`, From 43692aca7ab58d2d020b3c0b67242f238295c9d2 Mon Sep 17 00:00:00 2001 From: Mike Dolan Date: Fri, 3 May 2024 15:12:13 -0400 Subject: [PATCH 04/10] Update README.md to add Valkey entity information (#432) I added details at the end with the Valkey entity information under an "About Valkey" heading. Please feel free to adjust the heading if that's not an appropriate one to use. --------- Signed-off-by: Mike Dolan Signed-off-by: Madelyn Olson Co-authored-by: Madelyn Olson --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index b958a90b61..3c32be3941 100644 --- a/README.md +++ b/README.md @@ -215,3 +215,9 @@ Please see the [CONTRIBUTING.md][2]. For security bugs and vulnerabilities, plea [1]: https://github.com/valkey-io/valkey/blob/unstable/COPYING [2]: https://github.com/valkey-io/valkey/blob/unstable/CONTRIBUTING.md [3]: https://github.com/valkey-io/valkey/blob/unstable/SECURITY.md + +Valkey is an open community project under LF Projects +----------------- +Valkey a Series of LF Projects, LLC +2810 N Church St, PMB 57274 +Wilmington, Delaware 19802-4447 From 472c1ca26bfa770a506390a28d4e6c6739ec3916 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20S=C3=B6derqvist?= Date: Sat, 4 May 2024 00:14:56 +0200 Subject: [PATCH 05/10] Update links in module API docs (generated from module.c) (#433) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are used in the docs and on the website. Signed-off-by: Viktor Söderqvist --- src/module.c | 38 ++++++++++++++++---------------- utils/generate-module-api-doc.rb | 2 +- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/module.c b/src/module.c index 961525b58e..a1b1b46cc5 100644 --- a/src/module.c +++ b/src/module.c @@ -1001,7 +1001,7 @@ int moduleGetCommandChannelsViaAPI(struct serverCommand *cmd, robj **argv, int a * * These functions are used to implement custom commands. * - * For examples, see https://redis.io/topics/modules-intro. + * For examples, see https://valkey.io/topics/modules-intro. * -------------------------------------------------------------------------- */ /* Return non-zero if a module command, that was declared with the @@ -1215,7 +1215,7 @@ ValkeyModuleCommand *moduleCreateCommandProxy(struct ValkeyModule *module, sds d * from the same input arguments and key values. * Starting from Redis OSS 7.0 this flag has been deprecated. * Declaring a command as "random" can be done using - * command tips, see https://redis.io/topics/command-tips. + * command tips, see https://valkey.io/topics/command-tips. * * **"allow-stale"**: The command is allowed to run on slaves that don't * serve stale data. Don't use if you don't know what * this means. @@ -1605,7 +1605,7 @@ int VM_SetCommandACLCategories(ValkeyModuleCommand *command, const char *aclflag * both strings set to NULL. * * - `tips`: A string of space-separated tips regarding this command, meant for - * clients and proxies. See https://redis.io/topics/command-tips. + * clients and proxies. See https://valkey.io/topics/command-tips. * * - `arity`: Number of arguments, including the command name itself. A positive * number specifies an exact number of arguments and a negative number @@ -3205,7 +3205,7 @@ int VM_ReplyWithArray(ValkeyModuleCtx *ctx, long len) { } /* Reply with a RESP3 Map type of 'len' pairs. - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. + * Visit https://valkey.io/topics/protocol for more info about RESP3. * * After starting a map reply, the module must make `len*2` calls to other * `ReplyWith*` style functions in order to emit the elements of the map. @@ -3222,7 +3222,7 @@ int VM_ReplyWithMap(ValkeyModuleCtx *ctx, long len) { } /* Reply with a RESP3 Set type of 'len' elements. - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. + * Visit https://valkey.io/topics/protocol for more info about RESP3. * * After starting a set reply, the module must make `len` calls to other * `ReplyWith*` style functions in order to emit the elements of the set. @@ -3240,7 +3240,7 @@ int VM_ReplyWithSet(ValkeyModuleCtx *ctx, long len) { /* Add attributes (metadata) to the reply. Should be done before adding the - * actual reply. see https://github.com/antirez/RESP3/blob/master/spec.md#attribute-type + * actual reply. see https://valkey.io/topics/protocol#attribute-type * * After starting an attribute's reply, the module must make `len*2` calls to other * `ReplyWith*` style functions in order to emit the elements of the attribute map. @@ -3348,19 +3348,19 @@ void VM_ReplySetArrayLength(ValkeyModuleCtx *ctx, long len) { /* Very similar to ValkeyModule_ReplySetArrayLength except `len` should * exactly half of the number of `ReplyWith*` functions called in the * context of the map. - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. */ + * Visit https://valkey.io/topics/protocol for more info about RESP3. */ void VM_ReplySetMapLength(ValkeyModuleCtx *ctx, long len) { moduleReplySetCollectionLength(ctx, len, COLLECTION_REPLY_MAP); } /* Very similar to ValkeyModule_ReplySetArrayLength - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. */ + * Visit https://valkey.io/topics/protocol for more info about RESP3. */ void VM_ReplySetSetLength(ValkeyModuleCtx *ctx, long len) { moduleReplySetCollectionLength(ctx, len, COLLECTION_REPLY_SET); } /* Very similar to ValkeyModule_ReplySetMapLength - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. + * Visit https://valkey.io/topics/protocol for more info about RESP3. * * Must not be called if VM_ReplyWithAttribute returned an error. */ void VM_ReplySetAttributeLength(ValkeyModuleCtx *ctx, long len) { @@ -3439,7 +3439,7 @@ int VM_ReplyWithNull(ValkeyModuleCtx *ctx) { } /* Reply with a RESP3 Boolean type. - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. + * Visit https://valkey.io/topics/protocol for more info about RESP3. * * In RESP3, this is boolean type * In RESP2, it's a string response of "1" and "0" for true and false respectively. @@ -3489,7 +3489,7 @@ int VM_ReplyWithCallReply(ValkeyModuleCtx *ctx, ValkeyModuleCallReply *reply) { } /* Reply with a RESP3 Double type. - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. + * Visit https://valkey.io/topics/protocol for more info about RESP3. * * Send a string reply obtained converting the double 'd' into a bulk string. * This function is basically equivalent to converting a double into @@ -3508,7 +3508,7 @@ int VM_ReplyWithDouble(ValkeyModuleCtx *ctx, double d) { } /* Reply with a RESP3 BigNumber type. - * Visit https://github.com/antirez/RESP3/blob/master/spec.md for more info about RESP3. + * Visit https://valkey.io/topics/protocol for more info about RESP3. * * In RESP3, this is a string of length `len` that is tagged as a BigNumber, * however, it's up to the caller to ensure that it's a valid BigNumber. @@ -5426,7 +5426,7 @@ int VM_HashGet(ValkeyModuleKey *key, int flags, ...) { /* -------------------------------------------------------------------------- * ## Key API for Stream type * - * For an introduction to streams, see https://redis.io/topics/streams-intro. + * For an introduction to streams, see https://valkey.io/topics/streams-intro. * * The type ValkeyModuleStreamID, which is used in stream functions, is a struct * with two 64-bit fields and is defined as @@ -6299,7 +6299,7 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int * // Do something with myval. * } * - * This API is documented here: https://redis.io/topics/modules-intro + * This API is documented here: https://valkey.io/topics/modules-intro */ ValkeyModuleCallReply *VM_Call(ValkeyModuleCtx *ctx, const char *cmdname, const char *fmt, ...) { client *c = NULL; @@ -6808,7 +6808,7 @@ robj *moduleTypeDupOrReply(client *c, robj *fromkey, robj *tokey, int todb, robj /* Register a new data type exported by the module. The parameters are the * following. Please for in depth documentation check the modules API - * documentation, especially https://redis.io/topics/modules-native-types. + * documentation, especially https://valkey.io/topics/modules-native-types. * * * **name**: A 9 characters data type name that MUST be unique in the * Modules ecosystem. Be creative... and there will be no collisions. Use @@ -7697,7 +7697,7 @@ void VM_LatencyAddSample(const char *event, mstime_t latency) { * ## Blocking clients from modules * * For a guide about blocking commands in modules, see - * https://redis.io/topics/modules-blocking-ops. + * https://valkey.io/topics/modules-blocking-ops. * -------------------------------------------------------------------------- */ /* Returns 1 if the client already in the moduleUnblocked list, 0 otherwise. */ @@ -8717,7 +8717,7 @@ void moduleReleaseGIL(void) { * runs is dangerous and discouraged. In order to react to key space events with * write actions, please refer to `VM_AddPostNotificationJob`. * - * See https://redis.io/topics/notifications for more information. + * See https://valkey.io/topics/notifications for more information. */ int VM_SubscribeToKeyspaceEvents(ValkeyModuleCtx *ctx, int types, ValkeyModuleNotificationFunc callback) { ValkeyModuleKeyspaceSubscriber *sub = zmalloc(sizeof(*sub)); @@ -9823,7 +9823,7 @@ int moduleGetACLLogEntryReason(ValkeyModuleACLLogEntryReason reason) { /* Adds a new entry in the ACL log. * Returns VALKEYMODULE_OK on success and VALKEYMODULE_ERR on error. * - * For more information about ACL log, please refer to https://redis.io/commands/acl-log */ + * For more information about ACL log, please refer to https://valkey.io/commands/acl-log */ int VM_ACLAddLogEntry(ValkeyModuleCtx *ctx, ValkeyModuleUser *user, ValkeyModuleString *object, ValkeyModuleACLLogEntryReason reason) { int acl_reason = moduleGetACLLogEntryReason(reason); if (!acl_reason) return VALKEYMODULE_ERR; @@ -9834,7 +9834,7 @@ int VM_ACLAddLogEntry(ValkeyModuleCtx *ctx, ValkeyModuleUser *user, ValkeyModule /* Adds a new entry in the ACL log with the `username` ValkeyModuleString provided. * Returns VALKEYMODULE_OK on success and VALKEYMODULE_ERR on error. * - * For more information about ACL log, please refer to https://redis.io/commands/acl-log */ + * For more information about ACL log, please refer to https://valkey.io/commands/acl-log */ int VM_ACLAddLogEntryByUserName(ValkeyModuleCtx *ctx, ValkeyModuleString *username, ValkeyModuleString *object, ValkeyModuleACLLogEntryReason reason) { int acl_reason = moduleGetACLLogEntryReason(reason); if (!acl_reason) return VALKEYMODULE_ERR; diff --git a/utils/generate-module-api-doc.rb b/utils/generate-module-api-doc.rb index 1e3f76ecc4..8ea6d38395 100755 --- a/utils/generate-module-api-doc.rb +++ b/utils/generate-module-api-doc.rb @@ -160,7 +160,7 @@ def is_func_line(src, i) end # Populate the 'since' map (name => version) if we're in a git repo. -require File.dirname(__FILE__) ++ '/module-api-since.rb' +require "./" ++ File.dirname(__FILE__) ++ '/module-api-since.rb' git_dir = File.dirname(__FILE__) ++ "/../.git" if File.directory?(git_dir) && `which git` != "" `git --git-dir="#{git_dir}" tag --sort=v:refname`.each_line do |version| From 39d4b43d4beba0b656929a09d48ade662f52edf9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Svensson?= Date: Sat, 4 May 2024 01:54:14 +0200 Subject: [PATCH 06/10] Pin versions of Github Actions in CI (#221) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pin the Github Action dependencies to the hash according to secure software development best practices recommended by the Open Source Security Foundation (OpenSSF). When developing a CI workflow, it's common to version-pin dependencies (i.e. actions/checkout@v4). However, version tags are mutable, so a malicious attacker could overwrite a version tag to point to a malicious or vulnerable commit instead. Pinning workflow dependencies by hash ensures the dependency is immutable and its behavior is guaranteed. See https://github.com/ossf/scorecard/blob/main/docs/checks.md#pinned-dependencies The `dependabot` supports updating a hash and the version comment so its update will continue to work as before. Links to used actions and theit tag/hash for review/validation: https://github.com/actions/checkout/tags (v4.1.2 was rolled back) https://github.com/github/codeql-action/tags https://github.com/maxim-lobanov/setup-xcode/tags https://github.com/cross-platform-actions/action/releases/tag/v0.22.0 https://github.com/py-actions/py-dependency-install/tags https://github.com/actions/upload-artifact/tags https://github.com/actions/setup-node/tags https://github.com/taiki-e/install-action/releases/tag/v2.32.2 This PR is part of #211. Signed-off-by: Björn Svensson --- .github/workflows/ci.yml | 14 ++--- .github/workflows/codeql-analysis.yml | 8 +-- .github/workflows/coverity.yml | 2 +- .github/workflows/daily.yml | 64 ++++++++++++---------- .github/workflows/external.yml | 12 ++-- .github/workflows/reply-schemas-linter.yml | 4 +- .github/workflows/spell-check.yml | 4 +- 7 files changed, 57 insertions(+), 51 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c94893320b..91900f2d35 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: test-ubuntu-latest: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make # Fail build if there are warnings # build with TLS just for compilation coverage @@ -34,7 +34,7 @@ jobs: test-sanitizer-address: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make # build with TLS module just for compilation coverage run: make SANITIZER=address SERVER_CFLAGS='-Werror -DDEBUG_ASSERTIONS' BUILD_TLS=module @@ -52,7 +52,7 @@ jobs: runs-on: ubuntu-latest container: debian:buster steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make run: | apt-get update && apt-get install -y build-essential @@ -61,14 +61,14 @@ jobs: build-macos-latest: runs-on: macos-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make run: make SERVER_CFLAGS='-Werror' build-32bit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make run: | sudo apt-get update && sudo apt-get install libc6-dev-i386 @@ -77,7 +77,7 @@ jobs: build-libc-malloc: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: make run: make SERVER_CFLAGS='-Werror' MALLOC=libc @@ -87,7 +87,7 @@ jobs: steps: # on centos7, actions/checkout@v4 does not work, so we use v3 # ref. https://github.com/actions/checkout/issues/1487 - - uses: actions/checkout@v3 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 - name: make run: | diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5055e6b8dc..d4db9bcc29 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,15 +24,15 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index d63ba6ab53..47eb27276e 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -13,7 +13,7 @@ jobs: if: github.repository == 'valkey-io/valkey' runs-on: ubuntu-latest steps: - - uses: actions/checkout@main + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Download and extract the Coverity Build Tool run: | wget -q https://scan.coverity.com/download/cxx/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=valkey-io%2Fvalkey" -O cov-analysis-linux64.tar.gz diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index f4726a420e..bbe4b34b58 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -49,7 +49,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -93,7 +93,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -139,7 +139,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -176,7 +176,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -213,7 +213,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -260,7 +260,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -304,7 +304,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -348,7 +348,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -380,7 +380,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -458,7 +458,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -488,7 +488,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -523,7 +523,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -553,7 +553,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -593,7 +593,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -645,7 +645,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -691,7 +691,9 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v3 + # On centos7 actions/checkout@v4 does not work, so we use v3 + # ref. https://github.com/actions/checkout/issues/1487 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -731,7 +733,9 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v3 + # On centos7 actions/checkout@v4 does not work, so we use v3 + # ref. https://github.com/actions/checkout/issues/1487 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -778,7 +782,9 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v3 + # On centos7 actions/checkout@v4 does not work, so we use v3 + # ref. https://github.com/actions/checkout/issues/1487 + - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -824,7 +830,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -853,7 +859,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -879,7 +885,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -899,7 +905,7 @@ jobs: !contains(github.event.inputs.skipjobs, 'macos') timeout-minutes: 14400 steps: - - uses: maxim-lobanov/setup-xcode@v1 + - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 with: xcode-version: latest - name: prep @@ -911,7 +917,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -930,12 +936,12 @@ jobs: run: | echo "GITHUB_REPOSITORY=${{github.event.inputs.use_repo}}" >> $GITHUB_ENV echo "GITHUB_HEAD_REF=${{github.event.inputs.use_git_ref}}" >> $GITHUB_ENV - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: test - uses: cross-platform-actions/action@v0.22.0 + uses: cross-platform-actions/action@5800fa0060a22edf69992a779adac3d2bb3a6f8a # v0.22.0 with: operating_system: freebsd environment_variables: MAKE @@ -962,7 +968,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -1001,7 +1007,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -1040,7 +1046,7 @@ jobs: echo "skiptests: ${{github.event.inputs.skiptests}}" echo "test_args: ${{github.event.inputs.test_args}}" echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} @@ -1061,7 +1067,7 @@ jobs: if: true && !contains(github.event.inputs.skiptests, 'cluster') run: ./runtest-cluster --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}} - name: Install Python dependencies - uses: py-actions/py-dependency-install@v4 + uses: py-actions/py-dependency-install@30aa0023464ed4b5b116bd9fbdab87acf01a484e # v4.1.0 with: path: "./utils/req-res-validator/requirements.txt" - name: validator diff --git a/.github/workflows/external.yml b/.github/workflows/external.yml index 8111c5e6cd..8946526c8d 100644 --- a/.github/workflows/external.yml +++ b/.github/workflows/external.yml @@ -15,7 +15,7 @@ jobs: if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' timeout-minutes: 14400 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Build run: make SERVER_CFLAGS=-Werror - name: Start valkey-server @@ -30,7 +30,7 @@ jobs: --tags -slow - name: Archive server log if: ${{ failure() }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: test-external-standalone-log path: external-server.log @@ -40,7 +40,7 @@ jobs: if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' timeout-minutes: 14400 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Build run: make SERVER_CFLAGS=-Werror - name: Start valkey-server @@ -58,7 +58,7 @@ jobs: --tags -slow - name: Archive server log if: ${{ failure() }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: test-external-cluster-log path: external-server.log @@ -68,7 +68,7 @@ jobs: if: github.event_name != 'schedule' || github.repository == 'valkey-io/valkey' timeout-minutes: 14400 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Build run: make SERVER_CFLAGS=-Werror - name: Start valkey-server @@ -82,7 +82,7 @@ jobs: --tags "-slow -needs:debug" - name: Archive server log if: ${{ failure() }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: name: test-external-nodebug-log path: external-server.log diff --git a/.github/workflows/reply-schemas-linter.yml b/.github/workflows/reply-schemas-linter.yml index eb14a27204..0c4e62341e 100644 --- a/.github/workflows/reply-schemas-linter.yml +++ b/.github/workflows/reply-schemas-linter.yml @@ -15,9 +15,9 @@ jobs: reply-schemas-linter: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup nodejs - uses: actions/setup-node@v4 + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 - name: Install packages run: npm install ajv - name: linter diff --git a/.github/workflows/spell-check.yml b/.github/workflows/spell-check.yml index b4bc62e7b0..a5d3fd68d4 100644 --- a/.github/workflows/spell-check.yml +++ b/.github/workflows/spell-check.yml @@ -19,10 +19,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install typos - uses: taiki-e/install-action@v2.32.2 + uses: taiki-e/install-action@cd5df4de2e75f3b819ba55f780f7bb8cd4a05a41 # v2.32.2 with: tool: typos From 9ebbd5f03856ba8b6472aaf88712459a2e4d00b5 Mon Sep 17 00:00:00 2001 From: Sergey Fedorov Date: Sun, 5 May 2024 03:38:06 +0800 Subject: [PATCH 07/10] Fix issues for older versions of Darwin and improve PowerPC support (#436) Existing code does not build on macOS < 10.7. There are two issues: 1. The check for `MAC_OS_10_6_DETECTED` does the opposite of what it should, since `AvailabilityMacros.h` does not define underscore-prefixed versions of macros. `__MAC_OS_X_VERSION_MAX_ALLOWED` evaluates to 0, and on 10.6 everything breaks down: Credits to @mohd-akram who pointed out at possible origin of the this problem. 2. Once that is fixed, on 10.6 when building for `ppc` there are new errors, because the code uses inaccurate assumptions for archs. Fix that too. --------- Signed-off-by: Sergey Fedorov --- src/config.h | 11 ++++++----- src/debug.c | 8 +++++++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/config.h b/src/config.h index 2468110f8d..4646f653ea 100644 --- a/src/config.h +++ b/src/config.h @@ -42,7 +42,7 @@ #include #endif -#if defined(__APPLE__) && defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 1060 +#if defined(__APPLE__) && defined(MAC_OS_X_VERSION_MAX_ALLOWED) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1060 #define MAC_OS_10_6_DETECTED #endif @@ -217,12 +217,13 @@ void setproctitle(const char *fmt, ...); #if defined(sel) || defined(pyr) || defined(mc68000) || defined(sparc) || \ defined(is68k) || defined(tahoe) || defined(ibm032) || defined(ibm370) || \ - defined(MIPSEB) || defined(_MIPSEB) || defined(_IBMR2) || defined(DGUX) ||\ + defined(MIPSEB) || defined(_MIPSEB) || defined(_IBMR2) || defined(DGUX) || \ defined(apollo) || defined(__convex__) || defined(_CRAY) || \ defined(__hppa) || defined(__hp9000) || \ defined(__hp9000s300) || defined(__hp9000s700) || \ - defined (BIT_ZERO_ON_LEFT) || defined(m68k) || defined(__sparc) -#define BYTE_ORDER BIG_ENDIAN + defined (BIT_ZERO_ON_LEFT) || defined(m68k) || defined(__sparc) || \ + (defined(__APPLE__) && defined(__POWERPC__)) +#define BYTE_ORDER BIG_ENDIAN #endif #endif /* linux */ #endif /* BSD */ @@ -304,7 +305,7 @@ void setproctitle(const char *fmt, ...); #include #define valkey_set_thread_title(name) rename_thread(find_thread(0), name) #else -#if (defined __APPLE__ && defined(__MAC_OS_X_VERSION_MAX_ALLOWED) && __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070) +#if (defined __APPLE__ && defined(MAC_OS_X_VERSION_MAX_ALLOWED) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070) int pthread_setname_np(const char *name); #include #define valkey_set_thread_title(name) pthread_setname_np(name) diff --git a/src/debug.c b/src/debug.c index 33d145b28d..5fa9a70d5e 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1192,6 +1192,7 @@ static void* getAndSetMcontextEip(ucontext_t *uc, void *eip) { #elif defined(__i386__) GET_SET_RETURN(uc->uc_mcontext->__ss.__eip, eip); #else + /* OSX PowerPC */ GET_SET_RETURN(uc->uc_mcontext->__ss.__srr0, eip); #endif #elif defined(__APPLE__) && defined(MAC_OS_10_6_DETECTED) @@ -1200,6 +1201,8 @@ static void* getAndSetMcontextEip(ucontext_t *uc, void *eip) { GET_SET_RETURN(uc->uc_mcontext->__ss.__rip, eip); #elif defined(__i386__) GET_SET_RETURN(uc->uc_mcontext->__ss.__eip, eip); + #elif defined(__ppc__) + GET_SET_RETURN(uc->uc_mcontext->__ss.__srr0, eip); #else /* OSX ARM64 */ void *old_val = (void*)arm_thread_state64_get_pc(uc->uc_mcontext->__ss); @@ -1344,7 +1347,7 @@ void logRegisters(ucontext_t *uc) { (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__esp); - #else + #elif defined(__arm64__) /* OSX ARM64 */ serverLog(LL_WARNING, "\n" @@ -1393,6 +1396,9 @@ void logRegisters(ucontext_t *uc) { (unsigned long) uc->uc_mcontext->__ss.__cpsr ); logStackContent((void**) arm_thread_state64_get_sp(uc->uc_mcontext->__ss)); + #else + /* At the moment we do not implement this for PowerPC */ + NOT_SUPPORTED(); #endif /* Linux */ #elif defined(__linux__) From cc703aa3bcb866dfda6ca9bf6ca527ce6cc44ad9 Mon Sep 17 00:00:00 2001 From: Chen Tianjie Date: Mon, 6 May 2024 12:52:59 +0800 Subject: [PATCH 08/10] Input output traffic stats and command process count for each client. (#327) We already have global stats for input traffic, output traffic and how many commands have been executed. However, some users have the difficulty of locating the IP(s) which have heavy network traffic. So here some stats for single client are introduced. ``` tot-net-in // Total network input bytes read from the client tot-net-out // Total network output bytes sent to the client tot-cmds // Total commands the client has executed ``` These three stats are shown in `CLIENT LIST` and `CLIENT INFO`. Though the metrics are handled in hot paths of the code, personally I don't think it will slow down the server. Considering all other complex operations handled nearby, this is only a small and simple operation. However we do need to be cautious when adding more and more metrics, as discussed in redis/redis#12640, we may need to find a way to tell whether this has obvious performance degradation. --------- Signed-off-by: Chen Tianjie --- src/blocked.c | 1 + src/networking.c | 10 ++++- src/server.c | 8 +++- src/server.h | 3 ++ tests/unit/introspection.tcl | 73 +++++++++++++++++++++++++++++++++++- 5 files changed, 91 insertions(+), 4 deletions(-) diff --git a/src/blocked.c b/src/blocked.c index ad815a9b6c..ec1d377020 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -107,6 +107,7 @@ void updateStatsOnUnblock(client *c, long blocked_us, long reply_us, int had_err const ustime_t total_cmd_duration = c->duration + blocked_us + reply_us; c->lastcmd->microseconds += total_cmd_duration; c->lastcmd->calls++; + c->commands_processed++; server.stat_numcommands++; if (had_errors) c->lastcmd->failed_calls++; diff --git a/src/networking.c b/src/networking.c index f471d359b1..d5df47234a 100644 --- a/src/networking.c +++ b/src/networking.c @@ -214,6 +214,9 @@ client *createClient(connection *conn) { c->mem_usage_bucket_node = NULL; if (conn) linkClient(c); initClientMultiState(c); + c->net_input_bytes = 0; + c->net_output_bytes = 0; + c->commands_processed = 0; return c; } @@ -1991,6 +1994,7 @@ int writeToClient(client *c, int handler_installed) { } else { atomicIncr(server.stat_net_output_bytes, totwritten); } + c->net_output_bytes += totwritten; if (nwritten == -1) { if (connGetState(c->conn) != CONN_STATE_CONNECTED) { @@ -2718,6 +2722,7 @@ void readQueryFromClient(connection *conn) { } else { atomicIncr(server.stat_net_input_bytes, nread); } + c->net_input_bytes += nread; if (!(c->flags & CLIENT_MASTER) && /* The commands cached in the MULTI/EXEC queue have not been executed yet, @@ -2874,7 +2879,10 @@ sds catClientInfoString(sds s, client *client) { " redir=%I", (client->flags & CLIENT_TRACKING) ? (long long) client->client_tracking_redirection : -1, " resp=%i", client->resp, " lib-name=%s", client->lib_name ? (char*)client->lib_name->ptr : "", - " lib-ver=%s", client->lib_ver ? (char*)client->lib_ver->ptr : "")); + " lib-ver=%s", client->lib_ver ? (char*)client->lib_ver->ptr : "", + " tot-net-in=%U", client->net_input_bytes, + " tot-net-out=%U", client->net_output_bytes, + " tot-cmds=%U", client->commands_processed)); return ret; } diff --git a/src/server.c b/src/server.c index 0d8611f39e..45eeea597a 100644 --- a/src/server.c +++ b/src/server.c @@ -3734,8 +3734,14 @@ void call(client *c, int flags) { } } - if (!(c->flags & CLIENT_BLOCKED)) + if (!(c->flags & CLIENT_BLOCKED)) { + /* Modules may call commands in cron, in which case server.current_client + * is not set. */ + if (server.current_client) { + server.current_client->commands_processed++; + } server.stat_numcommands++; + } /* Record peak memory after each command and before the eviction that runs * before the next command. */ diff --git a/src/server.h b/src/server.h index 50ce567477..d71d6276d9 100644 --- a/src/server.h +++ b/src/server.h @@ -1282,6 +1282,9 @@ typedef struct client { #ifdef LOG_REQ_RES clientReqResInfo reqres; #endif + unsigned long long net_input_bytes; /* Total network input bytes read from this client. */ + unsigned long long net_output_bytes; /* Total network output bytes sent to this client. */ + unsigned long long commands_processed; /* Total count of commands this client executed. */ } client; /* ACL information */ diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 582a019d2d..92bacaa071 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -7,7 +7,7 @@ start_server {tags {"introspection"}} { test {CLIENT LIST} { r client list - } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|list user=* redir=-1 resp=* lib-name=* lib-ver=*} + } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|list user=* redir=-1 resp=* lib-name=* lib-ver=* tot-net-in=* tot-net-out=* tot-cmds=*} test {CLIENT LIST with IDs} { set myid [r client id] @@ -17,7 +17,76 @@ start_server {tags {"introspection"}} { test {CLIENT INFO} { r client info - } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|info user=* redir=-1 resp=* lib-name=* lib-ver=*} + } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 watch=0 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|info user=* redir=-1 resp=* lib-name=* lib-ver=* tot-net-in=* tot-net-out=* tot-cmds=*} + + proc get_field_in_client_info {info field} { + set info [string trim $info] + foreach item [split $info " "] { + set kv [split $item "="] + set k [lindex $kv 0] + if {[string match $field $k]} { + return [lindex $kv 1] + } + } + return "" + } + + proc get_field_in_client_list {id client_list filed} { + set list [split $client_list "\r\n"] + foreach info $list { + if {[string match "id=$id *" $info] } { + return [get_field_in_client_info $info $filed] + } + } + return "" + } + + test {client input output and command process statistics} { + set info1 [r client info] + set input1 [get_field_in_client_info $info1 "tot-net-in"] + set output1 [get_field_in_client_info $info1 "tot-net-out"] + set cmd1 [get_field_in_client_info $info1 "tot-cmds"] + set info2 [r client info] + set input2 [get_field_in_client_info $info2 "tot-net-in"] + set output2 [get_field_in_client_info $info2 "tot-net-out"] + set cmd2 [get_field_in_client_info $info2 "tot-cmds"] + assert_equal [expr $input1+26] $input2 + assert {[expr $output1+300] < $output2} + assert_equal [expr $cmd1+1] $cmd2 + # test blocking command + r del mylist + set rd [valkey_deferring_client] + $rd client id + set rd_id [$rd read] + set info_list [r client list] + set input3 [get_field_in_client_list $rd_id $info_list "tot-net-in"] + set output3 [get_field_in_client_list $rd_id $info_list "tot-net-out"] + set cmd3 [get_field_in_client_list $rd_id $info_list "tot-cmds"] + $rd blpop mylist 0 + set info_list [r client list] + set input4 [get_field_in_client_list $rd_id $info_list "tot-net-in"] + set output4 [get_field_in_client_list $rd_id $info_list "tot-net-out"] + set cmd4 [get_field_in_client_list $rd_id $info_list "tot-cmds"] + assert_equal [expr $input3+34] $input4 + assert_equal $output3 $output4 + assert_equal $cmd3 $cmd4 + r lpush mylist a + set info_list [r client list] + set input5 [get_field_in_client_list $rd_id $info_list "tot-net-in"] + set output5 [get_field_in_client_list $rd_id $info_list "tot-net-out"] + set cmd5 [get_field_in_client_list $rd_id $info_list "tot-cmds"] + assert_equal $input4 $input5 + assert_equal [expr $output4+23] $output5 + assert_equal [expr $cmd4+1] $cmd5 + $rd close + # test recursive command + set info [r client info] + set cmd6 [get_field_in_client_info $info "tot-cmds"] + r eval "server.call('ping')" 0 + set info [r client info] + set cmd7 [get_field_in_client_info $info "tot-cmds"] + assert_equal [expr $cmd6+3] $cmd7 + } test {CLIENT KILL with illegal arguments} { assert_error "ERR wrong number of arguments for 'client|kill' command" {r client kill} From 1b3199e0705817e2cdd8bca6ce3ab3b3567b4573 Mon Sep 17 00:00:00 2001 From: Madelyn Olson Date: Sun, 5 May 2024 22:00:08 -0700 Subject: [PATCH 09/10] Fix unit test issues on address sanitizer and fortify (#437) This commit does four things: 1. On various images, the linker was not able to correctly load the flto optimizations from the archive generated for unit tests, and was throwing errors. I was able to solve this by updating the plugin for the fortify test, but was unable to reproduce it on the ASAN tests or find a solution. So I decided to go with a single solution for now, which was to just disable the linker optimizations for those tests. This shouldn't weaken the protections provided by ASAN. 2. The change to remove flto for some reason caused some odd inlining behavior in the intset test, that I wasn't really able to understand. The error was basically that we were doing a 4 byte write, starting at byte offset 8, for the first addition to listpack that was of size 10. Practically this has no effect, since I'm not aware of any allocator that would give us a 10 byte block as opposed to 12 (or more likely 16) bytes. The isn't the correct behavior, since an uninitialized listpack defaults to 16bit encoding, which should only be writing 2 bytes. I rabbit holed like 2 hours into this, and gave up and just ignored the warning on the file. 3. Now that address sanitizer was correctly running, it picked up two issues. A memory leak and uninitialized value, so those were easy to fix. 4. There is also a small change to the fortify to build the test up front instead of later, this is just to be consistent with other tests and has no functional change. Signed-off-by: Madelyn Olson --- .github/workflows/daily.yml | 8 ++++---- src/unit/test_crc64combine.c | 2 +- src/unit/test_intset.c | 13 ++++++++++++- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index bbe4b34b58..7047a3247d 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -101,7 +101,7 @@ jobs: run: | apt-get update && apt-get install -y make gcc-13 update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100 - make CC=gcc SERVER_CFLAGS='-Werror -DSERVER_TEST -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3' + make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -DSERVER_TEST -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3' - name: testprep run: apt-get install -y tcl8.6 tclx procps - name: test @@ -121,7 +121,7 @@ jobs: run: ./src/valkey-server test all --accurate - name: new unit tests if: true && !contains(github.event.inputs.skiptests, 'unittest') - run: make valkey-unit-tests && ./src/valkey-unit-tests --accurate + run: ./src/valkey-unit-tests --accurate test-ubuntu-libc-malloc: runs-on: ubuntu-latest @@ -598,7 +598,7 @@ jobs: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: make - run: make all-with-unit-tests SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror -DDEBUG_ASSERTIONS' + run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-DSERVER_TEST -Werror -DDEBUG_ASSERTIONS' - name: testprep # Work around ASAN issue, see https://github.com/google/sanitizers/issues/1716 run: | @@ -650,7 +650,7 @@ jobs: repository: ${{ env.GITHUB_REPOSITORY }} ref: ${{ env.GITHUB_HEAD_REF }} - name: make - run: make all-with-unit-tests SANITIZER=undefined SERVER_CFLAGS='-DSERVER_TEST -Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations + run: make all-with-unit-tests OPT=-O3 SANITIZER=undefined SERVER_CFLAGS='-DSERVER_TEST -Werror' LUA_DEBUG=yes # we (ab)use this flow to also check Lua C API violations - name: testprep run: | sudo apt-get update diff --git a/src/unit/test_crc64combine.c b/src/unit/test_crc64combine.c index ed5c39d97f..cb1d96f1eb 100644 --- a/src/unit/test_crc64combine.c +++ b/src/unit/test_crc64combine.c @@ -26,7 +26,7 @@ long long _ustime(void) { } static int bench_crc64(unsigned char *data, uint64_t size, long long passes, uint64_t check, char *name, int csv) { - uint64_t min = size, hash; + uint64_t min = size, hash = 0; long long original_start = _ustime(), original_end; for (long long i=passes; i > 0; i--) { hash = crc64(0, data, size); diff --git a/src/unit/test_intset.c b/src/unit/test_intset.c index 9e23101dff..8aa6a63928 100644 --- a/src/unit/test_intset.c +++ b/src/unit/test_intset.c @@ -3,6 +3,14 @@ #include "../intset.c" #include "test_help.h" +#if defined(__GNUC__) && __GNUC__ >= 7 +/* Several functions in this file get inlined in such a way that fortify warns there might + * be an out of bounds memory access depending on the intset encoding, but they aren't actually + * reachable because we check the encoding. There are other strategies to fix this, but they + * all require other hacks to prevent the inlining. So for now, just omit the check. */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#endif static long long usec(void) { struct timeval tv; @@ -135,7 +143,6 @@ int test_intsetUpgradeFromint16Toint64(int argc, char **argv, int flags) { UNUSED(flags); intset *is = intsetNew(); - is = intsetNew(); is = intsetAdd(is,32,NULL); TEST_ASSERT(intrev32ifbe(is->encoding) == INTSET_ENC_INT16); is = intsetAdd(is,4294967295,NULL); @@ -227,3 +234,7 @@ int test_intsetStressAddDelete(int argc, char **argv, int flags) { return 0; } + +#if defined(__GNUC__) && __GNUC__ >= 12 +#pragma GCC diagnostic pop +#endif From 93f8a19b6f9884f382d24f7650822d99cbb26c6d Mon Sep 17 00:00:00 2001 From: NAM UK KIM Date: Mon, 6 May 2024 16:09:01 +0900 Subject: [PATCH 10/10] Change strlcat function name from redis to valkey (#440) Updated strlcat function and macros name (redis_strlcat -> valkey_strlcat). I think the standard strcat function is not safe. (https://codeql.github.com/codeql-query-help/cpp/cpp-unsafe-strcat/) So, it would be better to keep it as a safe function. Signed-off-by: NAM UK KIM --- src/fmacros.h | 2 +- src/rdb.c | 4 ++-- src/strl.c | 2 +- src/util.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/fmacros.h b/src/fmacros.h index e629d1be2f..aceda1c289 100644 --- a/src/fmacros.h +++ b/src/fmacros.h @@ -65,7 +65,7 @@ #if (__GNUC__ && __GNUC__ >= 4) && !defined __APPLE__ int sprintf(char *str, const char *format, ...) __attribute__((deprecated("please avoid use of unsafe C functions. prefer use of snprintf instead"))); char *strcpy(char *restrict dest, const char *src) __attribute__((deprecated("please avoid use of unsafe C functions. prefer use of valkey_strlcpy instead"))); -char *strcat(char *restrict dest, const char *restrict src) __attribute__((deprecated("please avoid use of unsafe C functions. prefer use of redis_strlcat instead"))); +char *strcat(char *restrict dest, const char *restrict src) __attribute__((deprecated("please avoid use of unsafe C functions. prefer use of valkey_strlcat instead"))); #endif #ifdef __linux__ diff --git a/src/rdb.c b/src/rdb.c index 0bf4513247..206cca6908 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1615,8 +1615,8 @@ void rdbRemoveTempFile(pid_t childpid, int from_signal) { /* Generate temp rdb file name using async-signal safe functions. */ ll2string(pid, sizeof(pid), childpid); valkey_strlcpy(tmpfile, "temp-", sizeof(tmpfile)); - redis_strlcat(tmpfile, pid, sizeof(tmpfile)); - redis_strlcat(tmpfile, ".rdb", sizeof(tmpfile)); + valkey_strlcat(tmpfile, pid, sizeof(tmpfile)); + valkey_strlcat(tmpfile, ".rdb", sizeof(tmpfile)); if (from_signal) { /* bg_unlink is not async-signal-safe, but in this case we don't really diff --git a/src/strl.c b/src/strl.c index e36faaab64..0453e0608f 100644 --- a/src/strl.c +++ b/src/strl.c @@ -53,7 +53,7 @@ valkey_strlcpy(char *dst, const char *src, size_t dsize) * If retval >= dsize, truncation occurred. */ size_t -redis_strlcat(char *dst, const char *src, size_t dsize) +valkey_strlcat(char *dst, const char *src, size_t dsize) { const char *odst = dst; const char *osrc = src; diff --git a/src/util.h b/src/util.h index 79fe743f43..ee50e29013 100644 --- a/src/util.h +++ b/src/util.h @@ -98,7 +98,7 @@ int snprintf_async_signal_safe(char *to, size_t n, const char *fmt, ...) int snprintf_async_signal_safe(char *to, size_t n, const char *fmt, ...); #endif size_t valkey_strlcpy(char *dst, const char *src, size_t dsize); -size_t redis_strlcat(char *dst, const char *src, size_t dsize); +size_t valkey_strlcat(char *dst, const char *src, size_t dsize); #ifdef SERVER_TEST int utilTest(int argc, char **argv, int flags);